From afd5894175715cc06d4c966b036cef94bd9c976f Mon Sep 17 00:00:00 2001 From: Cody Littley <56973212+cody-littley@users.noreply.github.com> Date: Tue, 19 Nov 2024 14:58:28 -0600 Subject: [PATCH 1/8] Relay rate limits (#906) Signed-off-by: Cody Littley --- relay/cmd/config.go | 33 ++- relay/cmd/flags/flags.go | 119 ++++++++ relay/limiter/blob_rate_limiter.go | 102 +++++++ relay/limiter/blob_rate_limiter_test.go | 163 +++++++++++ relay/limiter/chunk_rate_limiter.go | 151 ++++++++++ relay/limiter/chunk_rate_limiter_test.go | 335 +++++++++++++++++++++++ relay/limiter/config.go | 65 +++++ relay/limiter/limiter_test.go | 86 ++++++ relay/metadata_provider.go | 14 +- relay/relay_test_utils.go | 2 +- relay/server.go | 161 +++++++++-- relay/server_test.go | 47 +++- 12 files changed, 1224 insertions(+), 54 deletions(-) create mode 100644 relay/limiter/blob_rate_limiter.go create mode 100644 relay/limiter/blob_rate_limiter_test.go create mode 100644 relay/limiter/chunk_rate_limiter.go create mode 100644 relay/limiter/chunk_rate_limiter_test.go create mode 100644 relay/limiter/config.go create mode 100644 relay/limiter/limiter_test.go diff --git a/relay/cmd/config.go b/relay/cmd/config.go index c7b8b46fcc..bb7566f5a1 100644 --- a/relay/cmd/config.go +++ b/relay/cmd/config.go @@ -2,6 +2,7 @@ package main import ( "fmt" + "github.com/Layr-Labs/eigenda/relay/limiter" "github.com/Layr-Labs/eigenda/common" "github.com/Layr-Labs/eigenda/common/aws" @@ -12,21 +13,6 @@ import ( ) // Config is the configuration for the relay Server. -// -// Environment variables are mapped into this struct by taking the name of the field in this struct, -// converting to upper case, and prepending "RELAY_". For example, "BlobCacheSize" can be set using the -// environment variable "RELAY_BLOBCACHESIZE". -// -// For nested structs, add the name of the struct variable before the field name, separated by an underscore. -// For example, "Log.Format" can be set using the environment variable "RELAY_LOG_FORMAT". -// -// Slice values can be set using a comma-separated list. For example, "RelayIDs" can be set using the environment -// variable "RELAY_RELAYIDS='1,2,3,4'". -// -// It is also possible to set the configuration using a configuration file. The path to the configuration file should -// be passed as the first argument to the relay binary, e.g. "bin/relay config.yaml". The structure of the config -// file should mirror the structure of this struct, with keys in the config file matching the field names -// of this struct. type Config struct { // Log is the configuration for the logger. Default is common.DefaultLoggerConfig(). @@ -70,6 +56,23 @@ func NewConfig(ctx *cli.Context) (Config, error) { BlobMaxConcurrency: ctx.Int(flags.BlobMaxConcurrencyFlag.Name), ChunkCacheSize: ctx.Int(flags.ChunkCacheSizeFlag.Name), ChunkMaxConcurrency: ctx.Int(flags.ChunkMaxConcurrencyFlag.Name), + RateLimits: limiter.Config{ + MaxGetBlobOpsPerSecond: ctx.Float64(flags.MaxGetBlobOpsPerSecondFlag.Name), + GetBlobOpsBurstiness: ctx.Int(flags.GetBlobOpsBurstinessFlag.Name), + MaxGetBlobBytesPerSecond: ctx.Float64(flags.MaxGetBlobBytesPerSecondFlag.Name), + GetBlobBytesBurstiness: ctx.Int(flags.GetBlobBytesBurstinessFlag.Name), + MaxConcurrentGetBlobOps: ctx.Int(flags.MaxConcurrentGetBlobOpsFlag.Name), + MaxGetChunkOpsPerSecond: ctx.Float64(flags.MaxGetChunkOpsPerSecondFlag.Name), + GetChunkOpsBurstiness: ctx.Int(flags.GetChunkOpsBurstinessFlag.Name), + MaxGetChunkBytesPerSecond: ctx.Float64(flags.MaxGetChunkBytesPerSecondFlag.Name), + GetChunkBytesBurstiness: ctx.Int(flags.GetChunkBytesBurstinessFlag.Name), + MaxConcurrentGetChunkOps: ctx.Int(flags.MaxConcurrentGetChunkOpsFlag.Name), + MaxGetChunkOpsPerSecondClient: ctx.Float64(flags.MaxGetChunkOpsPerSecondClientFlag.Name), + GetChunkOpsBurstinessClient: ctx.Int(flags.GetChunkOpsBurstinessClientFlag.Name), + MaxGetChunkBytesPerSecondClient: ctx.Float64(flags.MaxGetChunkBytesPerSecondClientFlag.Name), + GetChunkBytesBurstinessClient: ctx.Int(flags.GetChunkBytesBurstinessClientFlag.Name), + MaxConcurrentGetChunkOpsClient: ctx.Int(flags.MaxConcurrentGetChunkOpsClientFlag.Name), + }, }, } for i, id := range relayIDs { diff --git a/relay/cmd/flags/flags.go b/relay/cmd/flags/flags.go index 63e63369e5..9abd673566 100644 --- a/relay/cmd/flags/flags.go +++ b/relay/cmd/flags/flags.go @@ -85,6 +85,110 @@ var ( EnvVar: common.PrefixEnvVar(envVarPrefix, "CHUNK_MAX_CONCURRENCY"), Value: 32, } + MaxGetBlobOpsPerSecondFlag = cli.Float64Flag{ + Name: common.PrefixFlag(FlagPrefix, "max-get-blob-ops-per-second"), + Usage: "Max number of GetBlob operations per second", + Required: false, + EnvVar: common.PrefixEnvVar(envVarPrefix, "MAX_GET_BLOB_OPS_PER_SECOND"), + Value: 1024, + } + GetBlobOpsBurstinessFlag = cli.IntFlag{ + Name: common.PrefixFlag(FlagPrefix, "get-blob-ops-burstiness"), + Usage: "Burstiness of the GetBlob rate limiter", + Required: false, + EnvVar: common.PrefixEnvVar(envVarPrefix, "GET_BLOB_OPS_BURSTINESS"), + Value: 1024, + } + MaxGetBlobBytesPerSecondFlag = cli.Float64Flag{ + Name: common.PrefixFlag(FlagPrefix, "max-get-blob-bytes-per-second"), + Usage: "Max bandwidth for GetBlob operations in bytes per second", + Required: false, + EnvVar: common.PrefixEnvVar(envVarPrefix, "MAX_GET_BLOB_BYTES_PER_SECOND"), + Value: 20 * 1024 * 1024, + } + GetBlobBytesBurstinessFlag = cli.IntFlag{ + Name: common.PrefixFlag(FlagPrefix, "get-blob-bytes-burstiness"), + Usage: "Burstiness of the GetBlob bandwidth rate limiter", + Required: false, + EnvVar: common.PrefixEnvVar(envVarPrefix, "GET_BLOB_BYTES_BURSTINESS"), + Value: 20 * 1024 * 1024, + } + MaxConcurrentGetBlobOpsFlag = cli.IntFlag{ + Name: common.PrefixFlag(FlagPrefix, "max-concurrent-get-blob-ops"), + Usage: "Max number of concurrent GetBlob operations", + Required: false, + EnvVar: common.PrefixEnvVar(envVarPrefix, "MAX_CONCURRENT_GET_BLOB_OPS"), + Value: 1024, + } + MaxGetChunkOpsPerSecondFlag = cli.Float64Flag{ + Name: common.PrefixFlag(FlagPrefix, "max-get-chunk-ops-per-second"), + Usage: "Max number of GetChunk operations per second", + Required: false, + EnvVar: common.PrefixEnvVar(envVarPrefix, "MAX_GET_CHUNK_OPS_PER_SECOND"), + Value: 1024, + } + GetChunkOpsBurstinessFlag = cli.IntFlag{ + Name: common.PrefixFlag(FlagPrefix, "get-chunk-ops-burstiness"), + Usage: "Burstiness of the GetChunk rate limiter", + Required: false, + EnvVar: common.PrefixEnvVar(envVarPrefix, "GET_CHUNK_OPS_BURSTINESS"), + Value: 1024, + } + MaxGetChunkBytesPerSecondFlag = cli.Float64Flag{ + Name: common.PrefixFlag(FlagPrefix, "max-get-chunk-bytes-per-second"), + Usage: "Max bandwidth for GetChunk operations in bytes per second", + Required: false, + EnvVar: common.PrefixEnvVar(envVarPrefix, "MAX_GET_CHUNK_BYTES_PER_SECOND"), + Value: 20 * 1024 * 1024, + } + GetChunkBytesBurstinessFlag = cli.IntFlag{ + Name: common.PrefixFlag(FlagPrefix, "get-chunk-bytes-burstiness"), + Usage: "Burstiness of the GetChunk bandwidth rate limiter", + Required: false, + EnvVar: common.PrefixEnvVar(envVarPrefix, "GET_CHUNK_BYTES_BURSTINESS"), + Value: 20 * 1024 * 1024, + } + MaxConcurrentGetChunkOpsFlag = cli.IntFlag{ + Name: common.PrefixFlag(FlagPrefix, "max-concurrent-get-chunk-ops"), + Usage: "Max number of concurrent GetChunk operations", + Required: false, + EnvVar: common.PrefixEnvVar(envVarPrefix, "MAX_CONCURRENT_GET_CHUNK_OPS"), + Value: 1024, + } + MaxGetChunkOpsPerSecondClientFlag = cli.Float64Flag{ + Name: common.PrefixFlag(FlagPrefix, "max-get-chunk-ops-per-second-client"), + Usage: "Max number of GetChunk operations per second per client", + Required: false, + EnvVar: common.PrefixEnvVar(envVarPrefix, "MAX_GET_CHUNK_OPS_PER_SECOND_CLIENT"), + Value: 8, + } + GetChunkOpsBurstinessClientFlag = cli.IntFlag{ + Name: common.PrefixFlag(FlagPrefix, "get-chunk-ops-burstiness-client"), + Usage: "Burstiness of the GetChunk rate limiter per client", + Required: false, + EnvVar: common.PrefixEnvVar(envVarPrefix, "GET_CHUNK_OPS_BURSTINESS_CLIENT"), + Value: 8, + } + MaxGetChunkBytesPerSecondClientFlag = cli.Float64Flag{ + Name: common.PrefixFlag(FlagPrefix, "max-get-chunk-bytes-per-second-client"), + Usage: "Max bandwidth for GetChunk operations in bytes per second per client", + Required: false, + EnvVar: common.PrefixEnvVar(envVarPrefix, "MAX_GET_CHUNK_BYTES_PER_SECOND_CLIENT"), + Value: 2 * 1024 * 1024, + } + GetChunkBytesBurstinessClientFlag = cli.IntFlag{ + Name: common.PrefixFlag(FlagPrefix, "get-chunk-bytes-burstiness-client"), + Usage: "Burstiness of the GetChunk bandwidth rate limiter per client", + Required: false, + EnvVar: common.PrefixEnvVar(envVarPrefix, "GET_CHUNK_BYTES_BURSTINESS_CLIENT"), + } + MaxConcurrentGetChunkOpsClientFlag = cli.IntFlag{ + Name: common.PrefixFlag(FlagPrefix, "max-concurrent-get-chunk-ops-client"), + Usage: "Max number of concurrent GetChunk operations per client", + Required: false, + EnvVar: common.PrefixEnvVar(envVarPrefix, "MAX_CONCURRENT_GET_CHUNK_OPS_CLIENT"), + Value: 1, + } ) var requiredFlags = []cli.Flag{ @@ -102,6 +206,21 @@ var optionalFlags = []cli.Flag{ BlobMaxConcurrencyFlag, ChunkCacheSizeFlag, ChunkMaxConcurrencyFlag, + MaxGetBlobOpsPerSecondFlag, + GetBlobOpsBurstinessFlag, + MaxGetBlobBytesPerSecondFlag, + GetBlobBytesBurstinessFlag, + MaxConcurrentGetBlobOpsFlag, + MaxGetChunkOpsPerSecondFlag, + GetChunkOpsBurstinessFlag, + MaxGetChunkBytesPerSecondFlag, + GetChunkBytesBurstinessFlag, + MaxConcurrentGetChunkOpsFlag, + MaxGetChunkOpsPerSecondClientFlag, + GetChunkOpsBurstinessClientFlag, + MaxGetChunkBytesPerSecondClientFlag, + GetChunkBytesBurstinessClientFlag, + MaxConcurrentGetChunkOpsClientFlag, } var Flags []cli.Flag diff --git a/relay/limiter/blob_rate_limiter.go b/relay/limiter/blob_rate_limiter.go new file mode 100644 index 0000000000..0ac260cba8 --- /dev/null +++ b/relay/limiter/blob_rate_limiter.go @@ -0,0 +1,102 @@ +package limiter + +import ( + "fmt" + "golang.org/x/time/rate" + "sync" + "time" +) + +// BlobRateLimiter enforces rate limits on GetBlob operations. +type BlobRateLimiter struct { + + // config is the rate limit configuration. + config *Config + + // opLimiter enforces rate limits on the maximum rate of GetBlob operations + opLimiter *rate.Limiter + + // bandwidthLimiter enforces rate limits on the maximum bandwidth consumed by GetBlob operations. Only the size + // of the blob data is considered, not the size of the entire response. + bandwidthLimiter *rate.Limiter + + // operationsInFlight is the number of GetBlob operations currently in flight. + operationsInFlight int + + // this lock is used to provide thread safety + lock sync.Mutex +} + +// NewBlobRateLimiter creates a new BlobRateLimiter. +func NewBlobRateLimiter(config *Config) *BlobRateLimiter { + globalGetBlobOpLimiter := rate.NewLimiter( + rate.Limit(config.MaxGetBlobOpsPerSecond), + config.GetBlobOpsBurstiness) + + globalGetBlobBandwidthLimiter := rate.NewLimiter( + rate.Limit(config.MaxGetBlobBytesPerSecond), + config.GetBlobBytesBurstiness) + + return &BlobRateLimiter{ + config: config, + opLimiter: globalGetBlobOpLimiter, + bandwidthLimiter: globalGetBlobBandwidthLimiter, + } +} + +// BeginGetBlobOperation should be called when a GetBlob operation is about to begin. If it returns an error, +// the operation should not be performed. If it does not return an error, FinishGetBlobOperation should be +// called when the operation completes. +func (l *BlobRateLimiter) BeginGetBlobOperation(now time.Time) error { + if l == nil { + // If the rate limiter is nil, do not enforce rate limits. + return nil + } + + l.lock.Lock() + defer l.lock.Unlock() + + if l.operationsInFlight >= l.config.MaxConcurrentGetBlobOps { + return fmt.Errorf("global concurrent request limit exceeded for getBlob operations, try again later") + } + if l.opLimiter.TokensAt(now) < 1 { + return fmt.Errorf("global rate limit exceeded for getBlob operations, try again later") + } + + l.operationsInFlight++ + l.opLimiter.AllowN(now, 1) + + return nil +} + +// FinishGetBlobOperation should be called exactly once for each time BeginGetBlobOperation is called and +// returns nil. +func (l *BlobRateLimiter) FinishGetBlobOperation() { + if l == nil { + // If the rate limiter is nil, do not enforce rate limits. + return + } + + l.lock.Lock() + defer l.lock.Unlock() + + l.operationsInFlight-- +} + +// RequestGetBlobBandwidth should be called when a GetBlob is about to start downloading blob data +// from S3. It returns an error if there is insufficient bandwidth available. If it returns nil, the +// operation should proceed. +func (l *BlobRateLimiter) RequestGetBlobBandwidth(now time.Time, bytes uint32) error { + if l == nil { + // If the rate limiter is nil, do not enforce rate limits. + return nil + } + + // no locking needed, the only thing we touch here is the bandwidthLimiter, which is inherently thread-safe + + allowed := l.bandwidthLimiter.AllowN(now, int(bytes)) + if !allowed { + return fmt.Errorf("global rate limit exceeded for getBlob bandwidth, try again later") + } + return nil +} diff --git a/relay/limiter/blob_rate_limiter_test.go b/relay/limiter/blob_rate_limiter_test.go new file mode 100644 index 0000000000..2966b6bea0 --- /dev/null +++ b/relay/limiter/blob_rate_limiter_test.go @@ -0,0 +1,163 @@ +package limiter + +import ( + tu "github.com/Layr-Labs/eigenda/common/testutils" + "github.com/stretchr/testify/require" + "golang.org/x/exp/rand" + "testing" + "time" +) + +func defaultConfig() *Config { + return &Config{ + MaxGetBlobOpsPerSecond: 1024, + GetBlobOpsBurstiness: 1024, + MaxGetBlobBytesPerSecond: 20 * 1024 * 1024, + GetBlobBytesBurstiness: 20 * 1024 * 1024, + MaxConcurrentGetBlobOps: 1024, + MaxGetChunkOpsPerSecond: 1024, + GetChunkOpsBurstiness: 1024, + MaxGetChunkBytesPerSecond: 20 * 1024 * 1024, + GetChunkBytesBurstiness: 20 * 1024 * 1024, + MaxConcurrentGetChunkOps: 1024, + MaxGetChunkOpsPerSecondClient: 8, + GetChunkOpsBurstinessClient: 8, + MaxGetChunkBytesPerSecondClient: 2 * 1024 * 1024, + GetChunkBytesBurstinessClient: 2 * 1024 * 1024, + MaxConcurrentGetChunkOpsClient: 1, + } +} + +func TestConcurrentBlobOperations(t *testing.T) { + tu.InitializeRandom() + + concurrencyLimit := 1 + rand.Intn(10) + + config := defaultConfig() + config.MaxConcurrentGetBlobOps = concurrencyLimit + // Make the burstiness limit high enough that we won't be rate limited + config.GetBlobOpsBurstiness = concurrencyLimit * 100 + + limiter := NewBlobRateLimiter(config) + + // time starts at current time, but advances manually afterward + now := time.Now() + + // We should be able to start this many operations concurrently + for i := 0; i < concurrencyLimit; i++ { + err := limiter.BeginGetBlobOperation(now) + require.NoError(t, err) + } + + // Starting one more operation should fail due to the concurrency limit + err := limiter.BeginGetBlobOperation(now) + require.Error(t, err) + + // Finish an operation. This should permit exactly one more operation to start + limiter.FinishGetBlobOperation() + err = limiter.BeginGetBlobOperation(now) + require.NoError(t, err) + err = limiter.BeginGetBlobOperation(now) + require.Error(t, err) +} + +func TestGetBlobOpRateLimit(t *testing.T) { + tu.InitializeRandom() + + config := defaultConfig() + config.MaxGetBlobOpsPerSecond = float64(2 + rand.Intn(10)) + config.GetBlobOpsBurstiness = int(config.MaxGetBlobOpsPerSecond) + rand.Intn(10) + config.MaxConcurrentGetBlobOps = 1 + + limiter := NewBlobRateLimiter(config) + + // time starts at current time, but advances manually afterward + now := time.Now() + + // Without advancing time, we should be able to perform a number of operations equal to the burstiness limit. + for i := 0; i < config.GetBlobOpsBurstiness; i++ { + err := limiter.BeginGetBlobOperation(now) + require.NoError(t, err) + limiter.FinishGetBlobOperation() + } + + // We are not at the rate limit, and should be able to start another operation. + err := limiter.BeginGetBlobOperation(now) + require.Error(t, err) + + // Advance time by one second. We should gain a number of tokens equal to the rate limit. + now = now.Add(time.Second) + for i := 0; i < int(config.MaxGetBlobOpsPerSecond); i++ { + err = limiter.BeginGetBlobOperation(now) + require.NoError(t, err) + limiter.FinishGetBlobOperation() + } + + // We have once again hit the rate limit. We should not be able to start another operation. + err = limiter.BeginGetBlobOperation(now) + require.Error(t, err) + + // Advance time by another second. We should gain another number of tokens equal to the rate limit. + // Intentionally do not finish the next operation. We are attempting to get a failure by exceeding + // the max concurrent operations limit. + now = now.Add(time.Second) + err = limiter.BeginGetBlobOperation(now) + require.NoError(t, err) + + // This operation should fail since we have limited concurrent operations to 1. It should not count + // against the rate limit. + err = limiter.BeginGetBlobOperation(now) + require.Error(t, err) + + // "finish" the prior operation. Verify that we have all expected tokens available. + limiter.FinishGetBlobOperation() + for i := 0; i < int(config.MaxGetBlobOpsPerSecond)-1; i++ { + err = limiter.BeginGetBlobOperation(now) + require.NoError(t, err) + limiter.FinishGetBlobOperation() + } + + // We should now be at the rate limit. We should not be able to start another operation. + err = limiter.BeginGetBlobOperation(now) + require.Error(t, err) +} + +func TestGetBlobBandwidthLimit(t *testing.T) { + tu.InitializeRandom() + + config := defaultConfig() + config.MaxGetBlobBytesPerSecond = float64(1024 + rand.Intn(1024*1024)) + config.GetBlobBytesBurstiness = int(config.MaxGetBlobBytesPerSecond) + rand.Intn(1024*1024) + + limiter := NewBlobRateLimiter(config) + + // time starts at current time, but advances manually afterward + now := time.Now() + + // Without advancing time, we should be able to utilize a number of bytes equal to the burstiness limit. + bytesRemaining := config.GetBlobBytesBurstiness + for bytesRemaining > 0 { + bytesToRequest := 1 + rand.Intn(bytesRemaining) + err := limiter.RequestGetBlobBandwidth(now, uint32(bytesToRequest)) + require.NoError(t, err) + bytesRemaining -= bytesToRequest + } + + // Requesting one more byte should fail due to the bandwidth limit + err := limiter.RequestGetBlobBandwidth(now, 1) + require.Error(t, err) + + // Advance time by one second. We should gain a number of tokens equal to the rate limit. + now = now.Add(time.Second) + bytesRemaining = int(config.MaxGetBlobBytesPerSecond) + for bytesRemaining > 0 { + bytesToRequest := 1 + rand.Intn(bytesRemaining) + err = limiter.RequestGetBlobBandwidth(now, uint32(bytesToRequest)) + require.NoError(t, err) + bytesRemaining -= bytesToRequest + } + + // Requesting one more byte should fail due to the bandwidth limit + err = limiter.RequestGetBlobBandwidth(now, 1) + require.Error(t, err) +} diff --git a/relay/limiter/chunk_rate_limiter.go b/relay/limiter/chunk_rate_limiter.go new file mode 100644 index 0000000000..fe899e5b17 --- /dev/null +++ b/relay/limiter/chunk_rate_limiter.go @@ -0,0 +1,151 @@ +package limiter + +import ( + "fmt" + "golang.org/x/time/rate" + "sync" + "time" +) + +// ChunkRateLimiter enforces rate limits on GetChunk operations. +type ChunkRateLimiter struct { + + // config is the rate limit configuration. + config *Config + + // global limiters + + // globalOpLimiter enforces global rate limits on the maximum rate of GetChunk operations + globalOpLimiter *rate.Limiter + + // globalBandwidthLimiter enforces global rate limits on the maximum bandwidth consumed by GetChunk operations. + globalBandwidthLimiter *rate.Limiter + + // globalOperationsInFlight is the number of GetChunk operations currently in flight. + globalOperationsInFlight int + + // per-client limiters + + // Note: in its current form, these expose a DOS vector, since an attacker can create many clients IDs + // and force these maps to become arbitrarily large. This will be remedied when authentication + // is implemented, as only authentication will happen prior to rate limiting. + + // perClientOpLimiter enforces per-client rate limits on the maximum rate of GetChunk operations + perClientOpLimiter map[string]*rate.Limiter + + // perClientBandwidthLimiter enforces per-client rate limits on the maximum bandwidth consumed by + // GetChunk operations. + perClientBandwidthLimiter map[string]*rate.Limiter + + // perClientOperationsInFlight is the number of GetChunk operations currently in flight for each client. + perClientOperationsInFlight map[string]int + + // this lock is used to provide thread safety + lock sync.Mutex +} + +// NewChunkRateLimiter creates a new ChunkRateLimiter. +func NewChunkRateLimiter(config *Config) *ChunkRateLimiter { + + globalOpLimiter := rate.NewLimiter(rate.Limit( + config.MaxGetChunkOpsPerSecond), + config.GetChunkOpsBurstiness) + + globalBandwidthLimiter := rate.NewLimiter(rate.Limit( + config.MaxGetChunkBytesPerSecond), + config.GetChunkBytesBurstiness) + + return &ChunkRateLimiter{ + config: config, + globalOpLimiter: globalOpLimiter, + globalBandwidthLimiter: globalBandwidthLimiter, + perClientOpLimiter: make(map[string]*rate.Limiter), + perClientBandwidthLimiter: make(map[string]*rate.Limiter), + perClientOperationsInFlight: make(map[string]int), + } +} + +// BeginGetChunkOperation should be called when a GetChunk operation is about to begin. If it returns an error, +// the operation should not be performed. If it does not return an error, FinishGetChunkOperation should be +// called when the operation completes. +func (l *ChunkRateLimiter) BeginGetChunkOperation( + now time.Time, + requesterID string) error { + if l == nil { + // If the rate limiter is nil, do not enforce rate limits. + return nil + } + + l.lock.Lock() + defer l.lock.Unlock() + + _, ok := l.perClientOperationsInFlight[requesterID] + if !ok { + // This is the first time we've seen this client ID. + l.perClientOperationsInFlight[requesterID] = 0 + + l.perClientOpLimiter[requesterID] = rate.NewLimiter( + rate.Limit(l.config.MaxGetChunkOpsPerSecondClient), + l.config.GetChunkOpsBurstinessClient) + + l.perClientBandwidthLimiter[requesterID] = rate.NewLimiter( + rate.Limit(l.config.MaxGetChunkBytesPerSecondClient), + l.config.GetChunkBytesBurstinessClient) + } + + if l.globalOperationsInFlight >= l.config.MaxConcurrentGetChunkOps { + return fmt.Errorf("global concurrent request limit exceeded for GetChunks operations, try again later") + } + if l.globalOpLimiter.TokensAt(now) < 1 { + return fmt.Errorf("global rate limit exceeded for GetChunks operations, try again later") + } + if l.perClientOperationsInFlight[requesterID] >= l.config.MaxConcurrentGetChunkOpsClient { + return fmt.Errorf("client concurrent request limit exceeded for GetChunks") + } + if l.perClientOpLimiter[requesterID].TokensAt(now) < 1 { + return fmt.Errorf("client rate limit exceeded for GetChunks, try again later") + } + + l.globalOperationsInFlight++ + l.perClientOperationsInFlight[requesterID]++ + l.globalOpLimiter.AllowN(now, 1) + l.perClientOpLimiter[requesterID].AllowN(now, 1) + + return nil +} + +// FinishGetChunkOperation should be called when a GetChunk operation completes. +func (l *ChunkRateLimiter) FinishGetChunkOperation(requesterID string) { + if l == nil { + return + } + + l.lock.Lock() + defer l.lock.Unlock() + + l.globalOperationsInFlight-- + l.perClientOperationsInFlight[requesterID]-- +} + +// RequestGetChunkBandwidth should be called when a GetChunk is about to start downloading chunk data. +func (l *ChunkRateLimiter) RequestGetChunkBandwidth(now time.Time, requesterID string, bytes int) error { + if l == nil { + // If the rate limiter is nil, do not enforce rate limits. + return nil + } + + // no lock needed here, as the bandwidth limiters themselves are thread-safe + + allowed := l.globalBandwidthLimiter.AllowN(now, bytes) + if !allowed { + return fmt.Errorf("global rate limit exceeded for GetChunk bandwidth, try again later") + } + + allowed = l.perClientBandwidthLimiter[requesterID].AllowN(now, bytes) + if !allowed { + l.globalBandwidthLimiter.AllowN(now, -bytes) + return fmt.Errorf("client rate limit exceeded for GetChunk bandwidth, try again later") + } + + return nil +} diff --git a/relay/limiter/chunk_rate_limiter_test.go b/relay/limiter/chunk_rate_limiter_test.go new file mode 100644 index 0000000000..59399ca17f --- /dev/null +++ b/relay/limiter/chunk_rate_limiter_test.go @@ -0,0 +1,335 @@ +package limiter + +import ( + tu "github.com/Layr-Labs/eigenda/common/testutils" + "github.com/stretchr/testify/require" + "golang.org/x/exp/rand" + "math" + "testing" + "time" +) + +func TestConcurrentGetChunksOperations(t *testing.T) { + tu.InitializeRandom() + + concurrencyLimit := 1 + rand.Intn(10) + + config := defaultConfig() + config.MaxConcurrentGetChunkOps = concurrencyLimit + config.MaxConcurrentGetChunkOpsClient = math.MaxInt32 + config.GetChunkOpsBurstiness = math.MaxInt32 + config.GetChunkOpsBurstinessClient = math.MaxInt32 + + userID := tu.RandomString(64) + + limiter := NewChunkRateLimiter(config) + + // time starts at current time, but advances manually afterward + now := time.Now() + + // We should be able to start this many operations concurrently + for i := 0; i < concurrencyLimit; i++ { + err := limiter.BeginGetChunkOperation(now, userID) + require.NoError(t, err) + } + + // Starting one more operation should fail due to the concurrency limit + err := limiter.BeginGetChunkOperation(now, userID) + require.Error(t, err) + + // Finish an operation. This should permit exactly one more operation to start + limiter.FinishGetChunkOperation(userID) + err = limiter.BeginGetChunkOperation(now, userID) + require.NoError(t, err) + err = limiter.BeginGetChunkOperation(now, userID) + require.Error(t, err) +} + +func TestGetChunksRateLimit(t *testing.T) { + tu.InitializeRandom() + + config := defaultConfig() + config.MaxGetChunkOpsPerSecond = float64(2 + rand.Intn(10)) + config.GetChunkOpsBurstiness = int(config.MaxGetChunkOpsPerSecond) + rand.Intn(10) + config.GetChunkOpsBurstinessClient = math.MaxInt32 + config.MaxConcurrentGetChunkOps = 1 + + userID := tu.RandomString(64) + + limiter := NewChunkRateLimiter(config) + + // time starts at current time, but advances manually afterward + now := time.Now() + + // Without advancing time, we should be able to perform a number of operations equal to the burstiness limit. + for i := 0; i < config.GetChunkOpsBurstiness; i++ { + err := limiter.BeginGetChunkOperation(now, userID) + require.NoError(t, err) + limiter.FinishGetChunkOperation(userID) + } + + // We are now at the rate limit, and should not be able to start another operation. + err := limiter.BeginGetChunkOperation(now, userID) + require.Error(t, err) + + // Advance time by one second. We should now be able to perform a number of operations equal to the rate limit. + now = now.Add(time.Second) + for i := 0; i < int(config.MaxGetChunkOpsPerSecond); i++ { + err = limiter.BeginGetChunkOperation(now, userID) + require.NoError(t, err) + limiter.FinishGetChunkOperation(userID) + } + + // We are now at the rate limit, and should not be able to start another operation. + err = limiter.BeginGetChunkOperation(now, userID) + require.Error(t, err) + + // Advance time by one second. + // Intentionally do not finish the operation. We are attempting to see what happens when an operation fails + // due to the limit on parallel operations. + now = now.Add(time.Second) + err = limiter.BeginGetChunkOperation(now, userID) + require.NoError(t, err) + + // This operation will fail due to the concurrency limit. It should not affect the rate limit. + err = limiter.BeginGetChunkOperation(now, userID) + require.Error(t, err) + + // Finish the operation that was started in the previous second. This should permit the next operation to start. + limiter.FinishGetChunkOperation(userID) + + // Verify that we have the expected number of available tokens. + for i := 0; i < int(config.MaxGetChunkOpsPerSecond)-1; i++ { + err = limiter.BeginGetChunkOperation(now, userID) + require.NoError(t, err) + limiter.FinishGetChunkOperation(userID) + } + + // We are now at the rate limit, and should not be able to start another operation. + err = limiter.BeginGetChunkOperation(now, userID) + require.Error(t, err) +} + +func TestGetChunksBandwidthLimit(t *testing.T) { + tu.InitializeRandom() + + config := defaultConfig() + config.MaxGetChunkBytesPerSecond = float64(1024 + rand.Intn(1024*1024)) + config.GetChunkBytesBurstiness = int(config.MaxGetBlobBytesPerSecond) + rand.Intn(1024*1024) + config.GetChunkBytesBurstinessClient = math.MaxInt32 + + userID := tu.RandomString(64) + + limiter := NewChunkRateLimiter(config) + + // time starts at current time, but advances manually afterward + now := time.Now() + + // "register" the user ID + err := limiter.BeginGetChunkOperation(now, userID) + require.NoError(t, err) + limiter.FinishGetChunkOperation(userID) + + // Without advancing time, we should be able to utilize a number of bytes equal to the burstiness limit. + bytesRemaining := config.GetChunkBytesBurstiness + for bytesRemaining > 0 { + bytesToRequest := 1 + rand.Intn(bytesRemaining) + err = limiter.RequestGetChunkBandwidth(now, userID, bytesToRequest) + require.NoError(t, err) + bytesRemaining -= bytesToRequest + } + + // Requesting one more byte should fail due to the bandwidth limit + err = limiter.RequestGetChunkBandwidth(now, userID, 1) + require.Error(t, err) + + // Advance time by one second. We should gain a number of tokens equal to the rate limit. + now = now.Add(time.Second) + bytesRemaining = int(config.MaxGetChunkBytesPerSecond) + for bytesRemaining > 0 { + bytesToRequest := 1 + rand.Intn(bytesRemaining) + err = limiter.RequestGetChunkBandwidth(now, userID, bytesToRequest) + require.NoError(t, err) + bytesRemaining -= bytesToRequest + } + + // Requesting one more byte should fail due to the bandwidth limit + err = limiter.RequestGetChunkBandwidth(now, userID, 1) + require.Error(t, err) +} + +func TestPerClientConcurrencyLimit(t *testing.T) { + tu.InitializeRandom() + + config := defaultConfig() + config.MaxConcurrentGetChunkOpsClient = 1 + rand.Intn(10) + config.MaxConcurrentGetChunkOps = 2 * config.MaxConcurrentGetChunkOpsClient + config.GetChunkOpsBurstinessClient = math.MaxInt32 + config.GetChunkOpsBurstiness = math.MaxInt32 + + userID1 := tu.RandomString(64) + userID2 := tu.RandomString(64) + + limiter := NewChunkRateLimiter(config) + + // time starts at current time, but advances manually afterward + now := time.Now() + + // Start the maximum permitted number of operations for user 1 + for i := 0; i < config.MaxConcurrentGetChunkOpsClient; i++ { + err := limiter.BeginGetChunkOperation(now, userID1) + require.NoError(t, err) + } + + // Starting another operation for user 1 should fail due to the concurrency limit + err := limiter.BeginGetChunkOperation(now, userID1) + require.Error(t, err) + + // The failure to start the operation for client 1 should not use up any of the global concurrency slots. + // To verify this, allow the maximum number of operations for client 2 to start. + for i := 0; i < config.MaxConcurrentGetChunkOpsClient; i++ { + err := limiter.BeginGetChunkOperation(now, userID2) + require.NoError(t, err) + } + + // Starting another operation for client 2 should fail due to the concurrency limit + err = limiter.BeginGetChunkOperation(now, userID2) + require.Error(t, err) + + // Ending an operation from client 2 should not affect the concurrency limit for client 1. + limiter.FinishGetChunkOperation(userID2) + err = limiter.BeginGetChunkOperation(now, userID1) + require.Error(t, err) + + // Ending an operation from client 1 should permit another operation for client 1 to start. + limiter.FinishGetChunkOperation(userID1) + err = limiter.BeginGetChunkOperation(now, userID1) + require.NoError(t, err) +} + +func TestOpLimitPerClient(t *testing.T) { + tu.InitializeRandom() + + config := defaultConfig() + config.MaxGetChunkOpsPerSecondClient = float64(2 + rand.Intn(10)) + config.GetChunkOpsBurstinessClient = int(config.MaxGetChunkOpsPerSecondClient) + rand.Intn(10) + config.GetChunkOpsBurstiness = math.MaxInt32 + + userID1 := tu.RandomString(64) + userID2 := tu.RandomString(64) + + limiter := NewChunkRateLimiter(config) + + // time starts at current time, but advances manually afterward + now := time.Now() + + // Without advancing time, we should be able to perform a number of operations equal to the burstiness limit. + for i := 0; i < config.GetChunkOpsBurstinessClient; i++ { + err := limiter.BeginGetChunkOperation(now, userID1) + require.NoError(t, err) + limiter.FinishGetChunkOperation(userID1) + } + + // We are not at the rate limit, and should be able to start another operation. + err := limiter.BeginGetChunkOperation(now, userID1) + require.Error(t, err) + + // Client 2 should not be rate limited based on actions by client 1. + for i := 0; i < config.GetChunkOpsBurstinessClient; i++ { + err := limiter.BeginGetChunkOperation(now, userID2) + require.NoError(t, err) + limiter.FinishGetChunkOperation(userID2) + } + + // Client 2 should now have exhausted its burstiness limit. + err = limiter.BeginGetChunkOperation(now, userID2) + require.Error(t, err) + + // Advancing time by a second should permit more operations. + now = now.Add(time.Second) + for i := 0; i < int(config.MaxGetChunkOpsPerSecondClient); i++ { + err = limiter.BeginGetChunkOperation(now, userID1) + require.NoError(t, err) + limiter.FinishGetChunkOperation(userID1) + err = limiter.BeginGetChunkOperation(now, userID2) + require.NoError(t, err) + limiter.FinishGetChunkOperation(userID2) + } + + // No more operations should be permitted for either client. + err = limiter.BeginGetChunkOperation(now, userID1) + require.Error(t, err) + err = limiter.BeginGetChunkOperation(now, userID2) + require.Error(t, err) +} + +func TestBandwidthLimitPerClient(t *testing.T) { + tu.InitializeRandom() + + config := defaultConfig() + config.MaxGetChunkBytesPerSecondClient = float64(1024 + rand.Intn(1024*1024)) + config.GetChunkBytesBurstinessClient = int(config.MaxGetBlobBytesPerSecond) + rand.Intn(1024*1024) + config.GetChunkBytesBurstiness = math.MaxInt32 + config.GetChunkOpsBurstiness = math.MaxInt32 + config.GetChunkOpsBurstinessClient = math.MaxInt32 + + userID1 := tu.RandomString(64) + userID2 := tu.RandomString(64) + + limiter := NewChunkRateLimiter(config) + + // time starts at current time, but advances manually afterward + now := time.Now() + + // "register" the user IDs + err := limiter.BeginGetChunkOperation(now, userID1) + require.NoError(t, err) + limiter.FinishGetChunkOperation(userID1) + err = limiter.BeginGetChunkOperation(now, userID2) + require.NoError(t, err) + limiter.FinishGetChunkOperation(userID2) + + // Request maximum possible bandwidth for client 1 + bytesRemaining := config.GetChunkBytesBurstinessClient + for bytesRemaining > 0 { + bytesToRequest := 1 + rand.Intn(bytesRemaining) + err = limiter.RequestGetChunkBandwidth(now, userID1, bytesToRequest) + require.NoError(t, err) + bytesRemaining -= bytesToRequest + } + + // Requesting one more byte should fail due to the bandwidth limit + err = limiter.RequestGetChunkBandwidth(now, userID1, 1) + require.Error(t, err) + + // User 2 should have its full bandwidth allowance available + bytesRemaining = config.GetChunkBytesBurstinessClient + for bytesRemaining > 0 { + bytesToRequest := 1 + rand.Intn(bytesRemaining) + err = limiter.RequestGetChunkBandwidth(now, userID2, bytesToRequest) + require.NoError(t, err) + bytesRemaining -= bytesToRequest + } + + // Requesting one more byte should fail due to the bandwidth limit + err = limiter.RequestGetChunkBandwidth(now, userID2, 1) + require.Error(t, err) + + // Advance time by one second. We should gain a number of tokens equal to the rate limit. + now = now.Add(time.Second) + bytesRemaining = int(config.MaxGetChunkBytesPerSecondClient) + for bytesRemaining > 0 { + bytesToRequest := 1 + rand.Intn(bytesRemaining) + err = limiter.RequestGetChunkBandwidth(now, userID1, bytesToRequest) + require.NoError(t, err) + err = limiter.RequestGetChunkBandwidth(now, userID2, bytesToRequest) + require.NoError(t, err) + bytesRemaining -= bytesToRequest + } + + // All bandwidth should now be exhausted for both clients + err = limiter.RequestGetChunkBandwidth(now, userID1, 1) + require.Error(t, err) + err = limiter.RequestGetChunkBandwidth(now, userID2, 1) + require.Error(t, err) +} diff --git a/relay/limiter/config.go b/relay/limiter/config.go new file mode 100644 index 0000000000..5f19d9362a --- /dev/null +++ b/relay/limiter/config.go @@ -0,0 +1,65 @@ +package limiter + +// Config is the configuration for the relay rate limiting. +type Config struct { + + // Blob rate limiting + + // MaxGetBlobOpsPerSecond is the maximum permitted number of GetBlob operations per second. Default is + // 1024. + MaxGetBlobOpsPerSecond float64 + // The burstiness of the MaxGetBlobOpsPerSecond rate limiter. This is the maximum burst size that happen within + // a short time window. Default is 1024. + GetBlobOpsBurstiness int + + // MaxGetBlobBytesPerSecond is the maximum bandwidth, in bytes, that GetBlob operations are permitted + // to consume per second. Default is 20MiB/s. + MaxGetBlobBytesPerSecond float64 + // The burstiness of the MaxGetBlobBytesPerSecond rate limiter. This is the maximum burst size that happen within + // a short time window. Default is 20MiB. + GetBlobBytesBurstiness int + + // MaxConcurrentGetBlobOps is the maximum number of concurrent GetBlob operations that are permitted. + // This is in addition to the rate limits. Default is 1024. + MaxConcurrentGetBlobOps int + + // Chunk rate limiting + + // MaxGetChunkOpsPerSecond is the maximum permitted number of GetChunk operations per second. Default is + // 1024. + MaxGetChunkOpsPerSecond float64 + // The burstiness of the MaxGetChunkOpsPerSecond rate limiter. This is the maximum burst size that happen within + // a short time window. Default is 1024. + GetChunkOpsBurstiness int + + // MaxGetChunkBytesPerSecond is the maximum bandwidth, in bytes, that GetChunk operations are permitted + // to consume per second. Default is 20MiB/s. + MaxGetChunkBytesPerSecond float64 + // The burstiness of the MaxGetChunkBytesPerSecond rate limiter. This is the maximum burst size that happen within + // a short time window. Default is 20MiB. + GetChunkBytesBurstiness int + + // MaxConcurrentGetChunkOps is the maximum number of concurrent GetChunk operations that are permitted. + // Default is 1024. + MaxConcurrentGetChunkOps int + + // Client rate limiting for GetChunk operations + + // MaxGetChunkOpsPerSecondClient is the maximum permitted number of GetChunk operations per second for a single + // client. Default is 8. + MaxGetChunkOpsPerSecondClient float64 + // The burstiness of the MaxGetChunkOpsPerSecondClient rate limiter. This is the maximum burst size that happen + // within a short time window. Default is 8. + GetChunkOpsBurstinessClient int + + // MaxGetChunkBytesPerSecondClient is the maximum bandwidth, in bytes, that GetChunk operations are permitted + // to consume per second. Default is 2MiB/s. + MaxGetChunkBytesPerSecondClient float64 + // The burstiness of the MaxGetChunkBytesPerSecondClient rate limiter. This is the maximum burst size that happen + // within a short time window. Default is 2MiB. + GetChunkBytesBurstinessClient int + + // MaxConcurrentGetChunkOpsClient is the maximum number of concurrent GetChunk operations that are permitted. + // Default is 1. + MaxConcurrentGetChunkOpsClient int +} diff --git a/relay/limiter/limiter_test.go b/relay/limiter/limiter_test.go new file mode 100644 index 0000000000..6064220f40 --- /dev/null +++ b/relay/limiter/limiter_test.go @@ -0,0 +1,86 @@ +package limiter + +import ( + "github.com/stretchr/testify/require" + "golang.org/x/time/rate" + "testing" + "time" +) + +// The rate.Limiter library has less documentation than ideal. Although I can figure out what it's doing by reading +// the code, I think it's risky writing things that depend on what may change in the future. In these tests, I verify +// some basic properties of the rate.Limiter library, so that if these properties ever change in the future, the tests +// will fail and we'll know to update the code. + +func TestPositiveTokens(t *testing.T) { + configuredRate := rate.Limit(10.0) + // "burst" is equivalent to the bucket size, aka the number of tokens that can be stored + configuredBurst := 10 + + // time starts at current time, but advances manually afterward + now := time.Now() + + rateLimiter := rate.NewLimiter(configuredRate, configuredBurst) + + // number of tokens should equal the burst limit + require.Equal(t, configuredBurst, int(rateLimiter.TokensAt(now))) + + // moving forward in time should not change the number of tokens + now = now.Add(time.Second) + require.Equal(t, configuredBurst, int(rateLimiter.TokensAt(now))) + + // remove each token without advancing time + for i := 0; i < configuredBurst; i++ { + require.True(t, rateLimiter.AllowN(now, 1)) + require.Equal(t, configuredBurst-i-1, int(rateLimiter.TokensAt(now))) + } + require.Equal(t, 0, int(rateLimiter.TokensAt(now))) + + // removing an additional token should fail + require.False(t, rateLimiter.AllowN(now, 1)) + require.Equal(t, 0, int(rateLimiter.TokensAt(now))) + + // tokens should return at a rate of once per 100ms + for i := 0; i < configuredBurst; i++ { + now = now.Add(100 * time.Millisecond) + require.Equal(t, i+1, int(rateLimiter.TokensAt(now))) + } + require.Equal(t, configuredBurst, int(rateLimiter.TokensAt(now))) + + // remove 7 tokens all at once + require.True(t, rateLimiter.AllowN(now, 7)) + require.Equal(t, 3, int(rateLimiter.TokensAt(now))) + + // move forward 500ms, returning 5 tokens + now = now.Add(500 * time.Millisecond) + require.Equal(t, 8, int(rateLimiter.TokensAt(now))) + + // try to take more than the burst limit + require.False(t, rateLimiter.AllowN(now, 100)) +} + +func TestNegativeTokens(t *testing.T) { + configuredRate := rate.Limit(10.0) + // "burst" is equivalent to the bucket size, aka the number of tokens that can be stored + configuredBurst := 10 + + // time starts at current time, but advances manually afterward + now := time.Now() + + rateLimiter := rate.NewLimiter(configuredRate, configuredBurst) + + // number of tokens should equal the burst limit + require.Equal(t, configuredBurst, int(rateLimiter.TokensAt(now))) + + // remove all tokens then add them back + require.True(t, rateLimiter.AllowN(now, configuredBurst)) + require.Equal(t, 0, int(rateLimiter.TokensAt(now))) + for i := 0; i < configuredBurst; i++ { + require.True(t, rateLimiter.AllowN(now, -1)) + require.Equal(t, i+1, int(rateLimiter.TokensAt(now))) + } + + // nothing funky should happen when time advances + now = now.Add(100 * time.Second) + require.Equal(t, configuredBurst, int(rateLimiter.TokensAt(now))) +} diff --git a/relay/metadata_provider.go b/relay/metadata_provider.go index f5b583d59f..3e32924072 100644 --- a/relay/metadata_provider.go +++ b/relay/metadata_provider.go @@ -5,6 +5,7 @@ import ( "fmt" "github.com/Layr-Labs/eigenda/core/v2" "github.com/Layr-Labs/eigenda/disperser/common/v2/blobstore" + "github.com/Layr-Labs/eigenda/encoding" "github.com/Layr-Labs/eigenda/relay/cache" "github.com/Layr-Labs/eigensdk-go/logging" "sync/atomic" @@ -15,6 +16,8 @@ import ( type blobMetadata struct { // the size of the blob in bytes blobSizeBytes uint32 + // the size of each encoded chunk + chunkSizeBytes uint32 // the size of the file containing the encoded chunks totalChunkSizeBytes uint32 // the fragment size used for uploading the encoded chunks @@ -153,8 +156,17 @@ func (m *metadataProvider) fetchMetadata(key v2.BlobKey) (*blobMetadata, error) } } + // TODO(cody-littley): blob size is not correct https://github.com/Layr-Labs/eigenda/pull/906#discussion_r1847396530 + blobSize := uint32(cert.BlobHeader.BlobCommitments.Length) + chunkSize, err := v2.GetChunkLength(cert.BlobHeader.BlobVersion, blobSize) + chunkSize *= encoding.BYTES_PER_SYMBOL + if err != nil { + return nil, fmt.Errorf("error getting chunk length: %w", err) + } + metadata := &blobMetadata{ - blobSizeBytes: 0, /* Future work: populate this once it is added to the metadata store */ + blobSizeBytes: blobSize, + chunkSizeBytes: chunkSize, totalChunkSizeBytes: fragmentInfo.TotalChunkSizeBytes, fragmentSizeBytes: fragmentInfo.FragmentSizeBytes, } diff --git a/relay/relay_test_utils.go b/relay/relay_test_utils.go index 0f5fdf1cf9..f850b65cc7 100644 --- a/relay/relay_test_utils.go +++ b/relay/relay_test_utils.go @@ -178,7 +178,7 @@ func buildChunkStore(t *testing.T, logger logging.Logger) (chunkstore.ChunkReade func randomBlob(t *testing.T) (*v2.BlobHeader, []byte) { - data := tu.RandomBytes(128) + data := tu.RandomBytes(225) // TODO talk to Ian about this data = codec.ConvertByPaddingEmptyByte(data) commitments, err := prover.GetCommitments(data) diff --git a/relay/server.go b/relay/server.go index c599c3e335..ad6072b9fe 100644 --- a/relay/server.go +++ b/relay/server.go @@ -4,8 +4,6 @@ import ( "context" "errors" "fmt" - "net" - pb "github.com/Layr-Labs/eigenda/api/grpc/relay" "github.com/Layr-Labs/eigenda/common/healthcheck" "github.com/Layr-Labs/eigenda/core" @@ -13,9 +11,12 @@ import ( "github.com/Layr-Labs/eigenda/disperser/common/v2/blobstore" "github.com/Layr-Labs/eigenda/encoding" "github.com/Layr-Labs/eigenda/relay/chunkstore" + "github.com/Layr-Labs/eigenda/relay/limiter" "github.com/Layr-Labs/eigensdk-go/logging" "google.golang.org/grpc" "google.golang.org/grpc/reflection" + "net" + "time" ) var _ pb.RelayServer = &Server{} @@ -24,11 +25,12 @@ var _ pb.RelayServer = &Server{} type Server struct { pb.UnimplementedRelayServer + // config is the configuration for the relay Server. + config *Config + // the logger for the server logger logging.Logger - config *Config - // metadataProvider encapsulates logic for fetching metadata for blobs. metadataProvider *metadataProvider @@ -38,32 +40,53 @@ type Server struct { // chunkProvider encapsulates logic for fetching chunks. chunkProvider *chunkProvider + // blobRateLimiter enforces rate limits on GetBlob and operations. + blobRateLimiter *limiter.BlobRateLimiter + + // chunkRateLimiter enforces rate limits on GetChunk operations. + chunkRateLimiter *limiter.ChunkRateLimiter + // grpcServer is the gRPC server. grpcServer *grpc.Server } type Config struct { + + // RelayIDs contains the IDs of the relays that this server is willing to serve data for. If empty, the server will + // serve data for any shard it can. + RelayIDs []v2.RelayKey + // GRPCPort is the port that the relay server listens on. GRPCPort int + // MaxGRPCMessageSize is the maximum size of a gRPC message that the server will accept. MaxGRPCMessageSize int + // MetadataCacheSize is the maximum number of items in the metadata cache. MetadataCacheSize int + // MetadataMaxConcurrency puts a limit on the maximum number of concurrent metadata fetches actively running on // goroutines. MetadataMaxConcurrency int + // BlobCacheSize is the maximum number of items in the blob cache. BlobCacheSize int + // BlobMaxConcurrency puts a limit on the maximum number of concurrent blob fetches actively running on goroutines. BlobMaxConcurrency int + // ChunkCacheSize is the maximum number of items in the chunk cache. ChunkCacheSize int + // ChunkMaxConcurrency is the size of the work pool for fetching chunks. Note that this does not // impact concurrency utilized by the s3 client to upload/download fragmented files. ChunkMaxConcurrency int - // RelayIDs contains the IDs of the relays that this server is willing to serve data for. If empty, the server will - // serve data for any shard it can. - RelayIDs []v2.RelayKey + + // MaxKeysPerGetChunksRequest is the maximum number of keys that can be requested in a single GetChunks request. + MaxKeysPerGetChunksRequest int + + // RateLimits contains configuration for rate limiting. + RateLimits limiter.Config } // NewServer creates a new relay Server. @@ -107,22 +130,28 @@ func NewServer( } return &Server{ - logger: logger, config: config, + logger: logger, metadataProvider: mp, blobProvider: bp, chunkProvider: cp, + blobRateLimiter: limiter.NewBlobRateLimiter(&config.RateLimits), + chunkRateLimiter: limiter.NewChunkRateLimiter(&config.RateLimits), }, nil } // GetBlob retrieves a blob stored by the relay. func (s *Server) GetBlob(ctx context.Context, request *pb.GetBlobRequest) (*pb.GetBlobReply, error) { - // Future work : - // - global throttle - // - per-connection throttle + // TODO(cody-littley): // - timeouts + err := s.blobRateLimiter.BeginGetBlobOperation(time.Now()) + if err != nil { + return nil, err + } + defer s.blobRateLimiter.FinishGetBlobOperation() + key, err := v2.BytesToBlobKey(request.BlobKey) if err != nil { return nil, fmt.Errorf("invalid blob key: %w", err) @@ -139,6 +168,11 @@ func (s *Server) GetBlob(ctx context.Context, request *pb.GetBlobRequest) (*pb.G return nil, fmt.Errorf("blob not found") } + err = s.blobRateLimiter.RequestGetBlobBandwidth(time.Now(), metadata.blobSizeBytes) + if err != nil { + return nil, err + } + data, err := s.blobProvider.GetBlob(key) if err != nil { return nil, fmt.Errorf("error fetching blob %s: %w", key.Hex(), err) @@ -154,16 +188,63 @@ func (s *Server) GetBlob(ctx context.Context, request *pb.GetBlobRequest) (*pb.G // GetChunks retrieves chunks from blobs stored by the relay. func (s *Server) GetChunks(ctx context.Context, request *pb.GetChunksRequest) (*pb.GetChunksReply, error) { - // Future work: + // TODO(cody-littley): // - authentication - // - global throttle - // - per-connection throttle // - timeouts if len(request.ChunkRequests) <= 0 { return nil, fmt.Errorf("no chunk requests provided") } + if len(request.ChunkRequests) > s.config.MaxKeysPerGetChunksRequest { + return nil, fmt.Errorf( + "too many chunk requests provided, max is %d", s.config.MaxKeysPerGetChunksRequest) + } + + // Future work: client IDs will be fixed when authentication is implemented + clientID := fmt.Sprintf("%d", request.RequesterId) + err := s.chunkRateLimiter.BeginGetChunkOperation(time.Now(), clientID) + if err != nil { + return nil, err + } + defer s.chunkRateLimiter.FinishGetChunkOperation(clientID) + + keys, err := getKeysFromChunkRequest(request) + if err != nil { + return nil, err + } + + mMap, err := s.metadataProvider.GetMetadataForBlobs(keys) + if err != nil { + return nil, fmt.Errorf( + "error fetching metadata for blob, check if blob exists and is assigned to this relay: %w", err) + } + + requiredBandwidth, err := computeChunkRequestRequiredBandwidth(request, mMap) + if err != nil { + return nil, fmt.Errorf("error computing required bandwidth: %w", err) + } + err = s.chunkRateLimiter.RequestGetChunkBandwidth(time.Now(), clientID, requiredBandwidth) + if err != nil { + return nil, err + } + + frames, err := s.chunkProvider.GetFrames(ctx, mMap) + if err != nil { + return nil, fmt.Errorf("error fetching frames: %w", err) + } + bytesToSend, err := gatherChunkDataToSend(frames, request) + if err != nil { + return nil, fmt.Errorf("error gathering chunk data: %w", err) + } + + return &pb.GetChunksReply{ + Data: bytesToSend, + }, nil +} + +// getKeysFromChunkRequest gathers a slice of blob keys from a GetChunks request. +func getKeysFromChunkRequest(request *pb.GetChunksRequest) ([]v2.BlobKey, error) { keys := make([]v2.BlobKey, 0, len(request.ChunkRequests)) for _, chunkRequest := range request.ChunkRequests { @@ -184,20 +265,16 @@ func (s *Server) GetChunks(ctx context.Context, request *pb.GetChunksRequest) (* keys = append(keys, key) } - mMap, err := s.metadataProvider.GetMetadataForBlobs(keys) - if err != nil { - return nil, fmt.Errorf( - "error fetching metadata for blob, check if blob exists and is assigned to this relay: %w", err) - } + return keys, nil +} - frames, err := s.chunkProvider.GetFrames(ctx, mMap) - if err != nil { - return nil, fmt.Errorf("error fetching frames: %w", err) - } +// gatherChunkDataToSend takes the chunk data and narrows it down to the data requested in the GetChunks request. +func gatherChunkDataToSend( + frames map[v2.BlobKey][]*encoding.Frame, + request *pb.GetChunksRequest) ([][]byte, error) { - bytesToSend := make([][]byte, 0, len(keys)) + bytesToSend := make([][]byte, 0, len(frames)) - // return data in the order that it was requested for _, chunkRequest := range request.ChunkRequests { framesToSend := make([]*encoding.Frame, 0) @@ -246,14 +323,40 @@ func (s *Server) GetChunks(ctx context.Context, request *pb.GetChunksRequest) (* bytesToSend = append(bytesToSend, bundleBytes) } - return &pb.GetChunksReply{ - Data: bytesToSend, - }, nil + return bytesToSend, nil +} + +// computeChunkRequestRequiredBandwidth computes the bandwidth required to fulfill a GetChunks request. +func computeChunkRequestRequiredBandwidth(request *pb.GetChunksRequest, mMap metadataMap) (int, error) { + requiredBandwidth := 0 + for _, req := range request.ChunkRequests { + var metadata *blobMetadata + var key v2.BlobKey + var requestedChunks int + + if req.GetByIndex() != nil { + key = v2.BlobKey(req.GetByIndex().GetBlobKey()) + metadata = mMap[key] + requestedChunks = len(req.GetByIndex().ChunkIndices) + } else { + key = v2.BlobKey(req.GetByRange().GetBlobKey()) + metadata = mMap[key] + requestedChunks = int(req.GetByRange().EndIndex - req.GetByRange().StartIndex) + } + + if metadata == nil { + return 0, fmt.Errorf("metadata not found for key %s", key.Hex()) + } + + requiredBandwidth += requestedChunks * int(metadata.chunkSizeBytes) + } + + return requiredBandwidth, nil + } // Start starts the server listening for requests. This method will block until the server is stopped. func (s *Server) Start() error { - // Serve grpc requests addr := fmt.Sprintf("0.0.0.0:%d", s.config.GRPCPort) listener, err := net.Listen("tcp", addr) diff --git a/relay/server_test.go b/relay/server_test.go index d480349067..cedfa6ddb4 100644 --- a/relay/server_test.go +++ b/relay/server_test.go @@ -2,6 +2,7 @@ package relay import ( "context" + "github.com/Layr-Labs/eigenda/relay/limiter" "math/rand" "testing" @@ -18,14 +19,32 @@ import ( func defaultConfig() *Config { return &Config{ - GRPCPort: 50051, - MaxGRPCMessageSize: 1024 * 1024 * 300, - MetadataCacheSize: 1024 * 1024, - MetadataMaxConcurrency: 32, - BlobCacheSize: 32, - BlobMaxConcurrency: 32, - ChunkCacheSize: 32, - ChunkMaxConcurrency: 32, + GRPCPort: 50051, + MaxGRPCMessageSize: 1024 * 1024 * 300, + MetadataCacheSize: 1024 * 1024, + MetadataMaxConcurrency: 32, + BlobCacheSize: 32, + BlobMaxConcurrency: 32, + ChunkCacheSize: 32, + ChunkMaxConcurrency: 32, + MaxKeysPerGetChunksRequest: 1024, + RateLimits: limiter.Config{ + MaxGetBlobOpsPerSecond: 1024, + GetBlobOpsBurstiness: 1024, + MaxGetBlobBytesPerSecond: 20 * 1024 * 1024, + GetBlobBytesBurstiness: 20 * 1024 * 1024, + MaxConcurrentGetBlobOps: 1024, + MaxGetChunkOpsPerSecond: 1024, + GetChunkOpsBurstiness: 1024, + MaxGetChunkBytesPerSecond: 20 * 1024 * 1024, + GetChunkBytesBurstiness: 20 * 1024 * 1024, + MaxConcurrentGetChunkOps: 1024, + MaxGetChunkOpsPerSecondClient: 8, + GetChunkOpsBurstinessClient: 8, + MaxGetChunkBytesPerSecondClient: 2 * 1024 * 1024, + GetChunkBytesBurstinessClient: 2 * 1024 * 1024, + MaxConcurrentGetChunkOpsClient: 1, + }, } } @@ -318,6 +337,10 @@ func TestReadWriteChunks(t *testing.T) { // This is the server used to read it back config := defaultConfig() + config.RateLimits.MaxGetChunkOpsPerSecond = 1000 + config.RateLimits.GetChunkOpsBurstiness = 1000 + config.RateLimits.MaxGetChunkOpsPerSecondClient = 1000 + config.RateLimits.GetChunkOpsBurstinessClient = 1000 server, err := NewServer( context.Background(), logger, @@ -634,6 +657,10 @@ func TestReadWriteChunksWithSharding(t *testing.T) { // This is the server used to read it back config := defaultConfig() config.RelayIDs = shardList + config.RateLimits.MaxGetChunkOpsPerSecond = 1000 + config.RateLimits.GetChunkOpsBurstiness = 1000 + config.RateLimits.MaxGetChunkOpsPerSecondClient = 1000 + config.RateLimits.GetChunkOpsBurstinessClient = 1000 server, err := NewServer( context.Background(), logger, @@ -904,6 +931,10 @@ func TestBatchedReadWriteChunksWithSharding(t *testing.T) { // This is the server used to read it back config := defaultConfig() config.RelayIDs = shardList + config.RateLimits.MaxGetChunkOpsPerSecond = 1000 + config.RateLimits.GetChunkOpsBurstiness = 1000 + config.RateLimits.MaxGetChunkOpsPerSecondClient = 1000 + config.RateLimits.GetChunkOpsBurstinessClient = 1000 server, err := NewServer( context.Background(), logger, From cf32712185dd4baba9927dcc3ca287ce0d1cbdfe Mon Sep 17 00:00:00 2001 From: Jian Xiao <99709935+jianoaix@users.noreply.github.com> Date: Tue, 19 Nov 2024 15:41:51 -0800 Subject: [PATCH 2/8] Add encoder queueing stats for autoscaling (#910) --- disperser/batcher/metrics.go | 34 ++--------------------------- disperser/common/utils.go | 42 ++++++++++++++++++++++++++++++++++++ disperser/encoder/metrics.go | 16 ++++++++++++++ disperser/encoder/server.go | 33 ++++++++++++++++++++++------ 4 files changed, 86 insertions(+), 39 deletions(-) create mode 100644 disperser/common/utils.go diff --git a/disperser/batcher/metrics.go b/disperser/batcher/metrics.go index 8ccabcb004..b0762a1f96 100644 --- a/disperser/batcher/metrics.go +++ b/disperser/batcher/metrics.go @@ -7,6 +7,7 @@ import ( "github.com/Layr-Labs/eigenda/core" "github.com/Layr-Labs/eigenda/disperser" + "github.com/Layr-Labs/eigenda/disperser/common" "github.com/Layr-Labs/eigensdk-go/logging" "github.com/prometheus/client_golang/prometheus" "github.com/prometheus/client_golang/prometheus/collectors" @@ -370,7 +371,7 @@ func (e *EncodingStreamerMetrics) UpdateEncodedBlobs(count int, size uint64) { } func (e *EncodingStreamerMetrics) ObserveEncodingLatency(state string, quorumId core.QuorumID, blobSize int, latencyMs float64) { - e.BlobEncodingLatency.WithLabelValues(state, fmt.Sprintf("%d", quorumId), blobSizeBucket(blobSize)).Observe(latencyMs) + e.BlobEncodingLatency.WithLabelValues(state, fmt.Sprintf("%d", quorumId), common.BlobSizeBucket(blobSize)).Observe(latencyMs) } func (t *TxnManagerMetrics) ObserveLatency(stage string, latencyMs float64) { @@ -408,34 +409,3 @@ func (f *FinalizerMetrics) UpdateLastSeenFinalizedBlock(blockNumber uint64) { func (f *FinalizerMetrics) ObserveLatency(stage string, latencyMs float64) { f.Latency.WithLabelValues(stage).Observe(latencyMs) } - -// blobSizeBucket maps the blob size into a bucket that's defined according to -// the power of 2. -func blobSizeBucket(blobSize int) string { - switch { - case blobSize <= 32*1024: - return "32KiB" - case blobSize <= 64*1024: - return "64KiB" - case blobSize <= 128*1024: - return "128KiB" - case blobSize <= 256*1024: - return "256KiB" - case blobSize <= 512*1024: - return "512KiB" - case blobSize <= 1024*1024: - return "1MiB" - case blobSize <= 2*1024*1024: - return "2MiB" - case blobSize <= 4*1024*1024: - return "4MiB" - case blobSize <= 8*1024*1024: - return "8MiB" - case blobSize <= 16*1024*1024: - return "16MiB" - case blobSize <= 32*1024*1024: - return "32MiB" - default: - return "invalid" - } -} diff --git a/disperser/common/utils.go b/disperser/common/utils.go new file mode 100644 index 0000000000..5d2d4ca799 --- /dev/null +++ b/disperser/common/utils.go @@ -0,0 +1,42 @@ +package common + +// BlobSizeBucket maps the blob size into a bucket that's defined according to +// the power of 2. +func BlobSizeBucket(blobSize int) string { + switch { + case blobSize <= 1*1024: + return "1KiB" + case blobSize <= 2*1024: + return "2KiB" + case blobSize <= 4*1024: + return "4KiB" + case blobSize <= 8*1024: + return "8KiB" + case blobSize <= 16*1024: + return "16KiB" + case blobSize <= 32*1024: + return "32KiB" + case blobSize <= 64*1024: + return "64KiB" + case blobSize <= 128*1024: + return "128KiB" + case blobSize <= 256*1024: + return "256KiB" + case blobSize <= 512*1024: + return "512KiB" + case blobSize <= 1024*1024: + return "1MiB" + case blobSize <= 2*1024*1024: + return "2MiB" + case blobSize <= 4*1024*1024: + return "4MiB" + case blobSize <= 8*1024*1024: + return "8MiB" + case blobSize <= 16*1024*1024: + return "16MiB" + case blobSize <= 32*1024*1024: + return "32MiB" + default: + return "invalid" + } +} diff --git a/disperser/encoder/metrics.go b/disperser/encoder/metrics.go index 008cd16573..a68071bb90 100644 --- a/disperser/encoder/metrics.go +++ b/disperser/encoder/metrics.go @@ -6,6 +6,7 @@ import ( "net/http" "time" + "github.com/Layr-Labs/eigenda/disperser/common" "github.com/Layr-Labs/eigensdk-go/logging" "github.com/prometheus/client_golang/prometheus" "github.com/prometheus/client_golang/prometheus/collectors" @@ -26,6 +27,7 @@ type Metrics struct { NumEncodeBlobRequests *prometheus.CounterVec BlobSizeTotal *prometheus.CounterVec Latency *prometheus.SummaryVec + BlobQueue *prometheus.GaugeVec } func NewMetrics(httpPort string, logger logging.Logger) *Metrics { @@ -62,6 +64,14 @@ func NewMetrics(httpPort string, logger logging.Logger) *Metrics { }, []string{"time"}, // time is either encoding or total ), + BlobQueue: promauto.With(reg).NewGaugeVec( + prometheus.GaugeOpts{ + Namespace: "eigenda_encoder", + Name: "blob_queue", + Help: "the number of blobs in the queue for encoding", + }, + []string{"size_bucket"}, + ), } } @@ -97,6 +107,12 @@ func (m *Metrics) ObserveLatency(stage string, duration time.Duration) { m.Latency.WithLabelValues(stage).Observe(float64(duration.Milliseconds())) } +func (m *Metrics) ObserveQueue(queueStats map[int]int) { + for blobSize, num := range queueStats { + m.BlobQueue.With(prometheus.Labels{"size_bucket": common.BlobSizeBucket(blobSize)}).Set(float64(num)) + } +} + func (m *Metrics) Start(ctx context.Context) { m.logger.Info("Starting metrics server at ", "port", m.httpPort) diff --git a/disperser/encoder/server.go b/disperser/encoder/server.go index 5d6a54a8c0..b02dd358d7 100644 --- a/disperser/encoder/server.go +++ b/disperser/encoder/server.go @@ -6,6 +6,7 @@ import ( "fmt" "log" "net" + "sync" "time" "github.com/Layr-Labs/eigenda/common/healthcheck" @@ -27,7 +28,14 @@ type EncoderServer struct { close func() runningRequests chan struct{} - requestPool chan struct{} + requestPool chan blobRequest + + queueStats map[int]int + queueLock sync.Mutex +} + +type blobRequest struct { + blobSizeByte int } func NewEncoderServer(config ServerConfig, logger logging.Logger, prover encoding.Prover, metrics *Metrics) *EncoderServer { @@ -38,7 +46,8 @@ func NewEncoderServer(config ServerConfig, logger logging.Logger, prover encodin metrics: metrics, runningRequests: make(chan struct{}, config.MaxConcurrentRequests), - requestPool: make(chan struct{}, config.RequestPoolSize), + requestPool: make(chan blobRequest, config.RequestPoolSize), + queueStats: make(map[int]int), } } @@ -80,27 +89,33 @@ func (s *EncoderServer) Close() { func (s *EncoderServer) EncodeBlob(ctx context.Context, req *pb.EncodeBlobRequest) (*pb.EncodeBlobReply, error) { startTime := time.Now() + blobSize := len(req.GetData()) select { - case s.requestPool <- struct{}{}: + case s.requestPool <- blobRequest{blobSizeByte: blobSize}: default: s.metrics.IncrementRateLimitedBlobRequestNum(len(req.GetData())) s.logger.Warn("rate limiting as request pool is full", "requestPoolSize", s.config.RequestPoolSize, "maxConcurrentRequests", s.config.MaxConcurrentRequests) return nil, errors.New("too many requests") } + s.queueLock.Lock() + s.queueStats[blobSize]++ + s.metrics.ObserveQueue(s.queueStats) + s.queueLock.Unlock() + s.runningRequests <- struct{}{} defer s.popRequest() if ctx.Err() != nil { - s.metrics.IncrementCanceledBlobRequestNum(len(req.GetData())) + s.metrics.IncrementCanceledBlobRequestNum(blobSize) return nil, ctx.Err() } s.metrics.ObserveLatency("queuing", time.Since(startTime)) reply, err := s.handleEncoding(ctx, req) if err != nil { - s.metrics.IncrementFailedBlobRequestNum(len(req.GetData())) + s.metrics.IncrementFailedBlobRequestNum(blobSize) } else { - s.metrics.IncrementSuccessfulBlobRequestNum(len(req.GetData())) + s.metrics.IncrementSuccessfulBlobRequestNum(blobSize) } s.metrics.ObserveLatency("total", time.Since(startTime)) @@ -108,8 +123,12 @@ func (s *EncoderServer) EncodeBlob(ctx context.Context, req *pb.EncodeBlobReques } func (s *EncoderServer) popRequest() { - <-s.requestPool + blobRequest := <-s.requestPool <-s.runningRequests + s.queueLock.Lock() + s.queueStats[blobRequest.blobSizeByte]-- + s.metrics.ObserveQueue(s.queueStats) + s.queueLock.Unlock() } func (s *EncoderServer) handleEncoding(ctx context.Context, req *pb.EncodeBlobRequest) (*pb.EncodeBlobReply, error) { From d6b99b56ec5a51f2a19e5e592d66db0f83f1ffbc Mon Sep 17 00:00:00 2001 From: Jian Xiao <99709935+jianoaix@users.noreply.github.com> Date: Tue, 19 Nov 2024 16:31:30 -0800 Subject: [PATCH 3/8] Track bucket instead of sizes (#913) --- disperser/encoder/metrics.go | 7 +++---- disperser/encoder/server.go | 16 ++++++++-------- 2 files changed, 11 insertions(+), 12 deletions(-) diff --git a/disperser/encoder/metrics.go b/disperser/encoder/metrics.go index a68071bb90..11ba438b54 100644 --- a/disperser/encoder/metrics.go +++ b/disperser/encoder/metrics.go @@ -6,7 +6,6 @@ import ( "net/http" "time" - "github.com/Layr-Labs/eigenda/disperser/common" "github.com/Layr-Labs/eigensdk-go/logging" "github.com/prometheus/client_golang/prometheus" "github.com/prometheus/client_golang/prometheus/collectors" @@ -107,9 +106,9 @@ func (m *Metrics) ObserveLatency(stage string, duration time.Duration) { m.Latency.WithLabelValues(stage).Observe(float64(duration.Milliseconds())) } -func (m *Metrics) ObserveQueue(queueStats map[int]int) { - for blobSize, num := range queueStats { - m.BlobQueue.With(prometheus.Labels{"size_bucket": common.BlobSizeBucket(blobSize)}).Set(float64(num)) +func (m *Metrics) ObserveQueue(queueStats map[string]int) { + for bucket, num := range queueStats { + m.BlobQueue.With(prometheus.Labels{"size_bucket": bucket}).Set(float64(num)) } } diff --git a/disperser/encoder/server.go b/disperser/encoder/server.go index b02dd358d7..18a7ad43ec 100644 --- a/disperser/encoder/server.go +++ b/disperser/encoder/server.go @@ -12,6 +12,7 @@ import ( "github.com/Layr-Labs/eigenda/common/healthcheck" "github.com/Layr-Labs/eigenda/disperser" pb "github.com/Layr-Labs/eigenda/disperser/api/grpc/encoder" + "github.com/Layr-Labs/eigenda/disperser/common" "github.com/Layr-Labs/eigenda/encoding" "github.com/Layr-Labs/eigensdk-go/logging" "google.golang.org/grpc" @@ -30,7 +31,7 @@ type EncoderServer struct { runningRequests chan struct{} requestPool chan blobRequest - queueStats map[int]int + queueStats map[string]int queueLock sync.Mutex } @@ -47,7 +48,7 @@ func NewEncoderServer(config ServerConfig, logger logging.Logger, prover encodin runningRequests: make(chan struct{}, config.MaxConcurrentRequests), requestPool: make(chan blobRequest, config.RequestPoolSize), - queueStats: make(map[int]int), + queueStats: make(map[string]int), } } @@ -92,16 +93,15 @@ func (s *EncoderServer) EncodeBlob(ctx context.Context, req *pb.EncodeBlobReques blobSize := len(req.GetData()) select { case s.requestPool <- blobRequest{blobSizeByte: blobSize}: + s.queueLock.Lock() + s.queueStats[common.BlobSizeBucket(blobSize)]++ + s.metrics.ObserveQueue(s.queueStats) + s.queueLock.Unlock() default: s.metrics.IncrementRateLimitedBlobRequestNum(len(req.GetData())) s.logger.Warn("rate limiting as request pool is full", "requestPoolSize", s.config.RequestPoolSize, "maxConcurrentRequests", s.config.MaxConcurrentRequests) return nil, errors.New("too many requests") } - s.queueLock.Lock() - s.queueStats[blobSize]++ - s.metrics.ObserveQueue(s.queueStats) - s.queueLock.Unlock() - s.runningRequests <- struct{}{} defer s.popRequest() @@ -126,7 +126,7 @@ func (s *EncoderServer) popRequest() { blobRequest := <-s.requestPool <-s.runningRequests s.queueLock.Lock() - s.queueStats[blobRequest.blobSizeByte]-- + s.queueStats[common.BlobSizeBucket(blobRequest.blobSizeByte)]-- s.metrics.ObserveQueue(s.queueStats) s.queueLock.Unlock() } From 88b1ac3732a083a049db94000820a7b6da7e1306 Mon Sep 17 00:00:00 2001 From: Ian Shim <100327837+ian-shim@users.noreply.github.com> Date: Tue, 19 Nov 2024 16:56:02 -0800 Subject: [PATCH 4/8] [v2] node/relay bug fixes (#908) --- api/clients/relay_client.go | 92 +++++++++++++++++++-------------- node/config.go | 3 ++ node/flags/flags.go | 7 +++ node/grpc/server_test.go | 1 + node/grpc/server_v2.go | 16 ++++++ node/grpc/server_v2_test.go | 78 +++++++++++++++------------- node/node.go | 23 ++++++++- node/store_v2.go | 7 +++ relay/metadata_provider.go | 17 ++++-- relay/metadata_provider_test.go | 14 +++-- relay/server.go | 8 +-- relay/server_test.go | 27 ++++++++-- 12 files changed, 202 insertions(+), 91 deletions(-) diff --git a/api/clients/relay_client.go b/api/clients/relay_client.go index 185f5b98e5..5ed6a0ead7 100644 --- a/api/clients/relay_client.go +++ b/api/clients/relay_client.go @@ -45,11 +45,15 @@ type RelayClient interface { type relayClient struct { config *RelayClientConfig - initOnce map[corev2.RelayKey]*sync.Once - conns map[corev2.RelayKey]*grpc.ClientConn - logger logging.Logger - - grpcClients map[corev2.RelayKey]relaygrpc.RelayClient + // initOnce is used to ensure that the connection to each relay is initialized only once. + // It maps relay key to a sync.Once instance: `map[corev2.RelayKey]*sync.Once` + initOnce *sync.Map + // conns maps relay key to the gRPC connection: `map[corev2.RelayKey]*grpc.ClientConn` + conns sync.Map + logger logging.Logger + + // grpcClients maps relay key to the gRPC client: `map[corev2.RelayKey]relaygrpc.RelayClient` + grpcClients sync.Map } var _ RelayClient = (*relayClient)(nil) @@ -57,37 +61,28 @@ var _ RelayClient = (*relayClient)(nil) // NewRelayClient creates a new RelayClient that connects to the relays specified in the config. // It keeps a connection to each relay and reuses it for subsequent requests, and the connection is lazily instantiated. func NewRelayClient(config *RelayClientConfig, logger logging.Logger) (*relayClient, error) { - if config == nil || len(config.Sockets) > 0 { + if config == nil || len(config.Sockets) <= 0 { return nil, fmt.Errorf("invalid config: %v", config) } - initOnce := make(map[corev2.RelayKey]*sync.Once) - conns := make(map[corev2.RelayKey]*grpc.ClientConn) - grpcClients := make(map[corev2.RelayKey]relaygrpc.RelayClient) + initOnce := sync.Map{} for key := range config.Sockets { - initOnce[key] = &sync.Once{} + initOnce.Store(key, &sync.Once{}) } return &relayClient{ config: config, - initOnce: initOnce, - conns: conns, + initOnce: &initOnce, logger: logger, - - grpcClients: grpcClients, }, nil } func (c *relayClient) GetBlob(ctx context.Context, relayKey corev2.RelayKey, blobKey corev2.BlobKey) ([]byte, error) { - if err := c.initOnceGrpcConnection(relayKey); err != nil { + client, err := c.getClient(relayKey) + if err != nil { return nil, err } - client, ok := c.grpcClients[relayKey] - if !ok { - return nil, fmt.Errorf("no grpc client for relay key: %v", relayKey) - } - res, err := client.GetBlob(ctx, &relaygrpc.GetBlobRequest{ BlobKey: blobKey[:], }) @@ -102,15 +97,11 @@ func (c *relayClient) GetChunksByRange(ctx context.Context, relayKey corev2.Rela if len(requests) == 0 { return nil, fmt.Errorf("no requests") } - if err := c.initOnceGrpcConnection(relayKey); err != nil { + client, err := c.getClient(relayKey) + if err != nil { return nil, err } - client, ok := c.grpcClients[relayKey] - if !ok { - return nil, fmt.Errorf("no grpc client for relay key: %v", relayKey) - } - grpcRequests := make([]*relaygrpc.ChunkRequest, len(requests)) for i, req := range requests { grpcRequests[i] = &relaygrpc.ChunkRequest{ @@ -138,13 +129,10 @@ func (c *relayClient) GetChunksByIndex(ctx context.Context, relayKey corev2.Rela if len(requests) == 0 { return nil, fmt.Errorf("no requests") } - if err := c.initOnceGrpcConnection(relayKey); err != nil { - return nil, err - } - client, ok := c.grpcClients[relayKey] - if !ok { - return nil, fmt.Errorf("no grpc client for relay key: %v", relayKey) + client, err := c.getClient(relayKey) + if err != nil { + return nil, err } grpcRequests := make([]*relaygrpc.ChunkRequest, len(requests)) @@ -169,9 +157,28 @@ func (c *relayClient) GetChunksByIndex(ctx context.Context, relayKey corev2.Rela return res.GetData(), nil } +func (c *relayClient) getClient(key corev2.RelayKey) (relaygrpc.RelayClient, error) { + if err := c.initOnceGrpcConnection(key); err != nil { + return nil, err + } + maybeClient, ok := c.grpcClients.Load(key) + if !ok { + return nil, fmt.Errorf("no grpc client for relay key: %v", key) + } + client, ok := maybeClient.(relaygrpc.RelayClient) + if !ok { + return nil, fmt.Errorf("invalid grpc client for relay key: %v", key) + } + return client, nil +} + func (c *relayClient) initOnceGrpcConnection(key corev2.RelayKey) error { var initErr error - c.initOnce[key].Do(func() { + once, ok := c.initOnce.Load(key) + if !ok { + return fmt.Errorf("unknown relay key: %v", key) + } + once.(*sync.Once).Do(func() { socket, ok := c.config.Sockets[key] if !ok { initErr = fmt.Errorf("unknown relay key: %v", key) @@ -183,24 +190,31 @@ func (c *relayClient) initOnceGrpcConnection(key corev2.RelayKey) error { initErr = err return } - c.conns[key] = conn - c.grpcClients[key] = relaygrpc.NewRelayClient(conn) + c.conns.Store(key, conn) + c.grpcClients.Store(key, relaygrpc.NewRelayClient(conn)) }) return initErr } func (c *relayClient) Close() error { var errList *multierror.Error - for k, conn := range c.conns { + c.conns.Range(func(k, v interface{}) bool { + conn, ok := v.(*grpc.ClientConn) + if !ok { + errList = multierror.Append(errList, fmt.Errorf("invalid connection for relay key: %v", k)) + return true + } + if conn != nil { err := conn.Close() - conn = nil - c.grpcClients[k] = nil + c.conns.Delete(k) + c.grpcClients.Delete(k) if err != nil { c.logger.Error("failed to close connection", "err", err) errList = multierror.Append(errList, err) } } - } + return true + }) return errList.ErrorOrNil() } diff --git a/node/config.go b/node/config.go index 3b83885798..e67fc7894f 100644 --- a/node/config.go +++ b/node/config.go @@ -88,6 +88,8 @@ type Config struct { EthClientConfig geth.EthClientConfig LoggerConfig common.LoggerConfig EncoderConfig kzg.KzgConfig + + EnableV2 bool } // NewConfig parses the Config from the provided flags or environment variables and @@ -232,5 +234,6 @@ func NewConfig(ctx *cli.Context) (*Config, error) { BLSKeyPassword: ctx.GlobalString(flags.BlsKeyPasswordFlag.Name), BLSSignerTLSCertFilePath: ctx.GlobalString(flags.BLSSignerCertFileFlag.Name), BLSRemoteSignerEnabled: blsRemoteSignerEnabled, + EnableV2: ctx.GlobalBool(flags.EnableV2Flag.Name), }, nil } diff --git a/node/flags/flags.go b/node/flags/flags.go index a1829d7acf..5bcd95a98b 100644 --- a/node/flags/flags.go +++ b/node/flags/flags.go @@ -218,6 +218,12 @@ var ( Required: false, EnvVar: common.PrefixEnvVar(EnvVarPrefix, "ENABLE_GNARK_BUNDLE_ENCODING"), } + EnableV2Flag = cli.BoolFlag{ + Name: "enable-v2", + Usage: "Enable V2 features", + Required: false, + EnvVar: common.PrefixEnvVar(EnvVarPrefix, "ENABLE_V2"), + } // Test only, DO NOT USE the following flags in production @@ -346,6 +352,7 @@ var optionalFlags = []cli.Flag{ BLSRemoteSignerUrlFlag, BLSPublicKeyHexFlag, BLSSignerCertFileFlag, + EnableV2Flag, } func init() { diff --git a/node/grpc/server_test.go b/node/grpc/server_test.go index 27819f59e0..8953f1f1cd 100644 --- a/node/grpc/server_test.go +++ b/node/grpc/server_test.go @@ -83,6 +83,7 @@ func makeConfig(t *testing.T) *node.Config { DbPath: t.TempDir(), ID: opID, NumBatchValidators: runtime.GOMAXPROCS(0), + EnableV2: false, } } diff --git a/node/grpc/server_v2.go b/node/grpc/server_v2.go index 9fc53178f3..4f46a70d53 100644 --- a/node/grpc/server_v2.go +++ b/node/grpc/server_v2.go @@ -58,6 +58,13 @@ func (s *ServerV2) NodeInfo(ctx context.Context, in *pb.NodeInfoRequest) (*pb.No } func (s *ServerV2) StoreChunks(ctx context.Context, in *pb.StoreChunksRequest) (*pb.StoreChunksReply, error) { + if !s.config.EnableV2 { + return nil, api.NewErrorInvalidArg("v2 API is disabled") + } + + if s.node.StoreV2 == nil { + return nil, api.NewErrorInternal("v2 store not initialized") + } batch, err := s.validateStoreChunksRequest(in) if err != nil { return nil, err @@ -68,6 +75,7 @@ func (s *ServerV2) StoreChunks(ctx context.Context, in *pb.StoreChunksRequest) ( return nil, api.NewErrorInternal(fmt.Sprintf("invalid batch header: %v", err)) } + s.logger.Info("new StoreChunks request", "batchHeaderHash", hex.EncodeToString(batchHeaderHash[:]), "numBlobs", len(batch.BlobCertificates), "referenceBlockNumber", batch.BatchHeader.ReferenceBlockNumber) operatorState, err := s.node.ChainState.GetOperatorStateByOperator(ctx, uint(batch.BatchHeader.ReferenceBlockNumber), s.node.Config.ID) if err != nil { return nil, err @@ -136,6 +144,14 @@ func (s *ServerV2) validateStoreChunksRequest(req *pb.StoreChunksRequest) (*core } func (s *ServerV2) GetChunks(ctx context.Context, in *pb.GetChunksRequest) (*pb.GetChunksReply, error) { + if !s.config.EnableV2 { + return nil, api.NewErrorInvalidArg("v2 API is disabled") + } + + if s.node.StoreV2 == nil { + return nil, api.NewErrorInternal("v2 store not initialized") + } + blobKey, err := corev2.BytesToBlobKey(in.GetBlobKey()) if err != nil { return nil, api.NewErrorInvalidArg(fmt.Sprintf("invalid blob key: %v", err)) diff --git a/node/grpc/server_v2_test.go b/node/grpc/server_v2_test.go index b83d36e72d..6bb15870c6 100644 --- a/node/grpc/server_v2_test.go +++ b/node/grpc/server_v2_test.go @@ -84,8 +84,21 @@ func TestV2NodeInfoRequest(t *testing.T) { assert.True(t, err == nil) } +func TestV2ServerWithoutV2(t *testing.T) { + config := makeConfig(t) + config.EnableV2 = false + c := newTestComponents(t, config) + _, err := c.server.StoreChunks(context.Background(), &pbv2.StoreChunksRequest{}) + requireErrorStatus(t, err, codes.InvalidArgument) + + _, err = c.server.GetChunks(context.Background(), &pbv2.GetChunksRequest{}) + requireErrorStatus(t, err, codes.InvalidArgument) +} + func TestV2StoreChunksInputValidation(t *testing.T) { - c := newTestComponents(t, makeConfig(t)) + config := makeConfig(t) + config.EnableV2 = true + c := newTestComponents(t, config) _, batch, _ := nodemock.MockBatch(t) batchProto, err := batch.ToProtobuf() require.NoError(t, err) @@ -94,10 +107,7 @@ func TestV2StoreChunksInputValidation(t *testing.T) { Batch: &pbcommon.Batch{}, } _, err = c.server.StoreChunks(context.Background(), req) - require.Error(t, err) - s, ok := status.FromError(err) - require.True(t, ok) - assert.Equal(t, s.Code(), codes.InvalidArgument) + requireErrorStatus(t, err, codes.InvalidArgument) req = &pbv2.StoreChunksRequest{ Batch: &pbcommon.Batch{ @@ -106,10 +116,7 @@ func TestV2StoreChunksInputValidation(t *testing.T) { }, } _, err = c.server.StoreChunks(context.Background(), req) - require.Error(t, err) - s, ok = status.FromError(err) - require.True(t, ok) - assert.Equal(t, s.Code(), codes.InvalidArgument) + requireErrorStatus(t, err, codes.InvalidArgument) req = &pbv2.StoreChunksRequest{ Batch: &pbcommon.Batch{ @@ -118,14 +125,13 @@ func TestV2StoreChunksInputValidation(t *testing.T) { }, } _, err = c.server.StoreChunks(context.Background(), req) - require.Error(t, err) - s, ok = status.FromError(err) - require.True(t, ok) - assert.Equal(t, s.Code(), codes.InvalidArgument) + requireErrorStatus(t, err, codes.InvalidArgument) } func TestV2StoreChunksSuccess(t *testing.T) { - c := newTestComponents(t, makeConfig(t)) + config := makeConfig(t) + config.EnableV2 = true + c := newTestComponents(t, config) blobKeys, batch, bundles := nodemock.MockBatch(t) batchProto, err := batch.ToProtobuf() @@ -176,7 +182,9 @@ func TestV2StoreChunksSuccess(t *testing.T) { } func TestV2StoreChunksDownloadFailure(t *testing.T) { - c := newTestComponents(t, makeConfig(t)) + config := makeConfig(t) + config.EnableV2 = true + c := newTestComponents(t, config) _, batch, _ := nodemock.MockBatch(t) batchProto, err := batch.ToProtobuf() @@ -191,14 +199,13 @@ func TestV2StoreChunksDownloadFailure(t *testing.T) { Batch: batchProto, }) require.Nil(t, reply.GetSignature()) - require.Error(t, err) - s, ok := status.FromError(err) - require.True(t, ok) - assert.Equal(t, s.Code(), codes.Internal) + requireErrorStatus(t, err, codes.Internal) } func TestV2StoreChunksStorageFailure(t *testing.T) { - c := newTestComponents(t, makeConfig(t)) + config := makeConfig(t) + config.EnableV2 = true + c := newTestComponents(t, config) blobKeys, batch, bundles := nodemock.MockBatch(t) batchProto, err := batch.ToProtobuf() @@ -238,14 +245,13 @@ func TestV2StoreChunksStorageFailure(t *testing.T) { Batch: batchProto, }) require.Nil(t, reply.GetSignature()) - require.Error(t, err) - s, ok := status.FromError(err) - require.True(t, ok) - assert.Equal(t, s.Code(), codes.Internal) + requireErrorStatus(t, err, codes.Internal) } func TestV2StoreChunksValidationFailure(t *testing.T) { - c := newTestComponents(t, makeConfig(t)) + config := makeConfig(t) + config.EnableV2 = true + c := newTestComponents(t, config) blobKeys, batch, bundles := nodemock.MockBatch(t) batchProto, err := batch.ToProtobuf() @@ -286,25 +292,21 @@ func TestV2StoreChunksValidationFailure(t *testing.T) { Batch: batchProto, }) require.Nil(t, reply.GetSignature()) - require.Error(t, err) - s, ok := status.FromError(err) - require.True(t, ok) - assert.Equal(t, s.Code(), codes.Internal) + requireErrorStatus(t, err, codes.Internal) c.store.AssertCalled(t, "DeleteKeys", mock.Anything, mock.Anything) } func TestV2GetChunksInputValidation(t *testing.T) { - c := newTestComponents(t, makeConfig(t)) + config := makeConfig(t) + config.EnableV2 = true + c := newTestComponents(t, config) ctx := context.Background() req := &pbv2.GetChunksRequest{ BlobKey: []byte{0}, } _, err := c.server.GetChunks(ctx, req) - require.Error(t, err) - s, ok := status.FromError(err) - require.True(t, ok) - assert.Equal(t, s.Code(), codes.InvalidArgument) + requireErrorStatus(t, err, codes.InvalidArgument) bk := [32]byte{0} maxUInt32 := uint32(0xFFFFFFFF) @@ -313,10 +315,14 @@ func TestV2GetChunksInputValidation(t *testing.T) { QuorumId: maxUInt32, } _, err = c.server.GetChunks(ctx, req) + requireErrorStatus(t, err, codes.InvalidArgument) +} + +func requireErrorStatus(t *testing.T, err error, code codes.Code) { require.Error(t, err) - s, ok = status.FromError(err) + s, ok := status.FromError(err) require.True(t, ok) - assert.Equal(t, s.Code(), codes.InvalidArgument) + assert.Equal(t, s.Code(), code) } type mockKey struct{} diff --git a/node/node.go b/node/node.go index a54b76ac33..d22058c7a5 100644 --- a/node/node.go +++ b/node/node.go @@ -16,6 +16,7 @@ import ( "sync" "time" + "github.com/Layr-Labs/eigenda/common/kvstore/tablestore" "github.com/Layr-Labs/eigenda/common/pubip" "github.com/Layr-Labs/eigenda/encoding/kzg/verifier" @@ -216,7 +217,25 @@ func NewNode( "eigenDAServiceManagerAddr", config.EigenDAServiceManagerAddr, "blockStaleMeasure", blockStaleMeasure, "storeDurationBlocks", storeDurationBlocks, "enableGnarkBundleEncoding", config.EnableGnarkBundleEncoding) var relayClient clients.RelayClient - // Create a new relay client with relay addresses onchain + var storeV2 StoreV2 + if config.EnableV2 { + v2Path := config.DbPath + "/chunk_v2" + dbV2, err := tablestore.Start(logger, &tablestore.Config{ + Type: tablestore.LevelDB, + Path: &v2Path, + GarbageCollectionEnabled: true, + GarbageCollectionInterval: time.Duration(config.ExpirationPollIntervalSec) * time.Second, + GarbageCollectionBatchSize: 1024, + Schema: []string{BatchHeaderTableName, BlobCertificateTableName, BundleTableName}, + }) + if err != nil { + return nil, fmt.Errorf("failed to create new tablestore: %w", err) + } + storeV2 = NewLevelDBStoreV2(dbV2, logger) + + // TODO(ian-shim): Create a new relay client with relay addresses onchain + } + return &Node{ Config: config, Logger: nodeLogger, @@ -224,7 +243,7 @@ func NewNode( Metrics: metrics, NodeApi: nodeApi, Store: store, - StoreV2: nil, + StoreV2: storeV2, ChainState: cst, Transactor: tx, Validator: validator, diff --git a/node/store_v2.go b/node/store_v2.go index c5979e72fe..62da00f54d 100644 --- a/node/store_v2.go +++ b/node/store_v2.go @@ -41,6 +41,13 @@ func NewLevelDBStoreV2(db kvstore.TableStore, logger logging.Logger) *storeV2 { } func (s *storeV2) StoreBatch(batch *corev2.Batch, rawBundles []*RawBundles) ([]kvstore.Key, error) { + if len(rawBundles) == 0 { + return nil, fmt.Errorf("no raw bundles") + } + if len(rawBundles) != len(batch.BlobCertificates) { + return nil, fmt.Errorf("mismatch between raw bundles (%d) and blob certificates (%d)", len(rawBundles), len(batch.BlobCertificates)) + } + dbBatch := s.db.NewTTLBatch() keys := make([]kvstore.Key, 0) diff --git a/relay/metadata_provider.go b/relay/metadata_provider.go index 3e32924072..33407fa124 100644 --- a/relay/metadata_provider.go +++ b/relay/metadata_provider.go @@ -3,12 +3,13 @@ package relay import ( "context" "fmt" - "github.com/Layr-Labs/eigenda/core/v2" + "sync/atomic" + + v2 "github.com/Layr-Labs/eigenda/core/v2" "github.com/Layr-Labs/eigenda/disperser/common/v2/blobstore" "github.com/Layr-Labs/eigenda/encoding" "github.com/Layr-Labs/eigenda/relay/cache" "github.com/Layr-Labs/eigensdk-go/logging" - "sync/atomic" ) // Metadata about a blob. The relay only needs a small subset of a blob's metadata. @@ -79,8 +80,10 @@ func newMetadataProvider( type metadataMap map[v2.BlobKey]*blobMetadata // GetMetadataForBlobs retrieves metadata about multiple blobs in parallel. +// If any of the blobs do not exist, an error is returned. +// Note that resulting metadata map may not have the same length as the input +// keys slice if the input keys slice has duplicate items. func (m *metadataProvider) GetMetadataForBlobs(keys []v2.BlobKey) (metadataMap, error) { - // blobMetadataResult is the result of a metadata fetch operation. type blobMetadataResult struct { key v2.BlobKey @@ -94,7 +97,12 @@ func (m *metadataProvider) GetMetadataForBlobs(keys []v2.BlobKey) (metadataMap, // Set when the first error is encountered. Useful for preventing new operations from starting. hadError := atomic.Bool{} + mMap := make(metadataMap) for _, key := range keys { + mMap[key] = nil + } + + for key := range mMap { if hadError.Load() { // Don't bother starting new operations if we've already encountered an error. break @@ -122,8 +130,7 @@ func (m *metadataProvider) GetMetadataForBlobs(keys []v2.BlobKey) (metadataMap, }() } - mMap := make(metadataMap) - for len(mMap) < len(keys) { + for range mMap { result := <-completionChannel if result.err != nil { return nil, fmt.Errorf("error fetching metadata for blob %s: %w", result.key.Hex(), result.err) diff --git a/relay/metadata_provider_test.go b/relay/metadata_provider_test.go index e5586d901b..32e5a3e80c 100644 --- a/relay/metadata_provider_test.go +++ b/relay/metadata_provider_test.go @@ -2,14 +2,15 @@ package relay import ( "context" + "math/rand" + "testing" + "github.com/Layr-Labs/eigenda/common" tu "github.com/Layr-Labs/eigenda/common/testutils" - "github.com/Layr-Labs/eigenda/core/v2" + v2 "github.com/Layr-Labs/eigenda/core/v2" "github.com/Layr-Labs/eigenda/encoding" "github.com/stretchr/testify/assert" "github.com/stretchr/testify/require" - "math/rand" - "testing" ) func TestGetNonExistentBlob(t *testing.T) { @@ -121,10 +122,12 @@ func TestBatchedFetch(t *testing.T) { // Write some metadata blobCount := 10 + blobKeys := make([]v2.BlobKey, blobCount) for i := 0; i < blobCount; i++ { header, _ := randomBlob(t) blobKey, err := header.BlobKey() require.NoError(t, err) + blobKeys[i] = blobKey totalChunkSizeBytes := uint32(rand.Intn(1024 * 1024 * 1024)) fragmentSizeBytes := uint32(rand.Intn(1024 * 1024)) @@ -179,6 +182,11 @@ func TestBatchedFetch(t *testing.T) { require.Equal(t, fragmentSizeMap[key], metadata.fragmentSizeBytes) } } + + // Test fetching with duplicate keys + mMap, err := server.GetMetadataForBlobs([]v2.BlobKey{blobKeys[0], blobKeys[0]}) + require.NoError(t, err) + require.Equal(t, 1, len(mMap)) } func TestIndividualFetchWithSharding(t *testing.T) { diff --git a/relay/server.go b/relay/server.go index ad6072b9fe..56bedea146 100644 --- a/relay/server.go +++ b/relay/server.go @@ -4,6 +4,9 @@ import ( "context" "errors" "fmt" + "net" + "time" + pb "github.com/Layr-Labs/eigenda/api/grpc/relay" "github.com/Layr-Labs/eigenda/common/healthcheck" "github.com/Layr-Labs/eigenda/core" @@ -15,8 +18,6 @@ import ( "github.com/Layr-Labs/eigensdk-go/logging" "google.golang.org/grpc" "google.golang.org/grpc/reflection" - "net" - "time" ) var _ pb.RelayServer = &Server{} @@ -208,6 +209,7 @@ func (s *Server) GetChunks(ctx context.Context, request *pb.GetChunksRequest) (* } defer s.chunkRateLimiter.FinishGetChunkOperation(clientID) + // keys might contain duplicate keys keys, err := getKeysFromChunkRequest(request) if err != nil { return nil, err @@ -273,7 +275,7 @@ func gatherChunkDataToSend( frames map[v2.BlobKey][]*encoding.Frame, request *pb.GetChunksRequest) ([][]byte, error) { - bytesToSend := make([][]byte, 0, len(frames)) + bytesToSend := make([][]byte, 0, len(request.ChunkRequests)) for _, chunkRequest := range request.ChunkRequests { diff --git a/relay/server_test.go b/relay/server_test.go index cedfa6ddb4..10a7ecca91 100644 --- a/relay/server_test.go +++ b/relay/server_test.go @@ -2,10 +2,11 @@ package relay import ( "context" - "github.com/Layr-Labs/eigenda/relay/limiter" "math/rand" "testing" + "github.com/Layr-Labs/eigenda/relay/limiter" + pb "github.com/Layr-Labs/eigenda/api/grpc/relay" "github.com/Layr-Labs/eigenda/common" tu "github.com/Layr-Labs/eigenda/common/testutils" @@ -1015,6 +1016,16 @@ func TestBatchedReadWriteChunksWithSharding(t *testing.T) { requestedChunks = append(requestedChunks, request) } + // Add a request for duplicate key with different index range + requestedChunks = append(requestedChunks, &pb.ChunkRequest{ + Request: &pb.ChunkRequest_ByRange{ + ByRange: &pb.ChunkRequestByRange{ + BlobKey: keys[0][:], + StartIndex: uint32(len(expectedData[keys[0]]) / 2), + EndIndex: uint32(len(expectedData[keys[0]])), + }, + }, + }) request := &pb.GetChunksRequest{ ChunkRequests: requestedChunks, } @@ -1036,11 +1047,10 @@ func TestBatchedReadWriteChunksWithSharding(t *testing.T) { } response, err := getChunks(t, request) - if allInCorrectShard { require.NoError(t, err) - require.Equal(t, keyCount, len(response.Data)) + require.Equal(t, keyCount+1, len(response.Data)) for keyIndex, key := range keys { data := expectedData[key] @@ -1052,6 +1062,17 @@ func TestBatchedReadWriteChunksWithSharding(t *testing.T) { require.Equal(t, data[frameIndex], frame) } } + + // Check the duplicate key + key := keys[0] + data := expectedData[key][len(expectedData[key])/2:] + + bundle, err := core.Bundle{}.Deserialize(response.Data[keyCount]) + require.NoError(t, err) + + for frameIndex, frame := range bundle { + require.Equal(t, data[frameIndex], frame) + } } else { require.Error(t, err) require.Nil(t, response) From fef9ec64dcf20553ca0695482df46e4fee64dabb Mon Sep 17 00:00:00 2001 From: Robert Raynor <35671663+mooselumph@users.noreply.github.com> Date: Tue, 19 Nov 2024 17:36:28 -0800 Subject: [PATCH 5/8] Use power of 2 for blob length (#912) --- api/clients/disperser_client_v2.go | 3 ++- core/v2/core_test.go | 2 +- core/v2/serialization_test.go | 14 +++++++------- core/v2/types_test.go | 6 +++--- disperser/apiserver/server_v2.go | 2 +- disperser/apiserver/server_v2_test.go | 6 +++--- encoding/encoding.go | 2 +- encoding/kzg/prover/cpu/multiframe_proof.go | 13 ++++++++++++- encoding/kzg/prover/parametrized_prover.go | 20 ++++++++++---------- encoding/kzg/prover/proof_device.go | 1 + encoding/kzg/prover/prover.go | 9 +++++---- encoding/mock/encoder.go | 2 +- encoding/params.go | 6 +++--- encoding/utils.go | 20 ++++++++++++-------- relay/relay_test_utils.go | 2 +- 15 files changed, 63 insertions(+), 45 deletions(-) diff --git a/api/clients/disperser_client_v2.go b/api/clients/disperser_client_v2.go index b7de5bebdb..e9d2e57f3e 100644 --- a/api/clients/disperser_client_v2.go +++ b/api/clients/disperser_client_v2.go @@ -149,7 +149,8 @@ func (c *disperserClientV2) DisperseBlob( blobCommitments = *deserialized } else { // if prover is configured, get commitments from prover - blobCommitments, err = c.prover.GetCommitments(data) + + blobCommitments, err = c.prover.GetCommitmentsForPaddedLength(data) if err != nil { return nil, [32]byte{}, fmt.Errorf("error getting blob commitments: %w", err) } diff --git a/core/v2/core_test.go b/core/v2/core_test.go index 7404ba92b4..54f4e02e45 100644 --- a/core/v2/core_test.go +++ b/core/v2/core_test.go @@ -94,7 +94,7 @@ func makeTestBlob(t *testing.T, p encoding.Prover, version corev2.BlobVersion, l data = codec.ConvertByPaddingEmptyByte(data) - commitments, err := p.GetCommitments(data) + commitments, err := p.GetCommitmentsForPaddedLength(data) if err != nil { t.Fatal(err) } diff --git a/core/v2/serialization_test.go b/core/v2/serialization_test.go index 36b57ee79e..20b4d830f0 100644 --- a/core/v2/serialization_test.go +++ b/core/v2/serialization_test.go @@ -34,7 +34,7 @@ func TestPaymentHash(t *testing.T) { func TestBlobKeyFromHeader(t *testing.T) { data := codec.ConvertByPaddingEmptyByte(GETTYSBURG_ADDRESS_BYTES) - commitments, err := p.GetCommitments(data) + commitments, err := p.GetCommitmentsForPaddedLength(data) if err != nil { t.Fatal(err) } @@ -52,8 +52,8 @@ func TestBlobKeyFromHeader(t *testing.T) { } blobKey, err := bh.BlobKey() assert.NoError(t, err) - // 0xb19d368345990c79744fe571fe99f427f35787b9383c55089fb5bd6a5c171bbc verified in solidity - assert.Equal(t, "b19d368345990c79744fe571fe99f427f35787b9383c55089fb5bd6a5c171bbc", blobKey.Hex()) + // 0x40efb7273649f39590b27550ea06eeb81efd6ae4d719385a302fbd93173a395d verified in solidity + assert.Equal(t, "40efb7273649f39590b27550ea06eeb81efd6ae4d719385a302fbd93173a395d", blobKey.Hex()) } func TestBatchHeaderHash(t *testing.T) { @@ -87,7 +87,7 @@ func TestBatchHeaderSerialization(t *testing.T) { func TestBlobCertHash(t *testing.T) { data := codec.ConvertByPaddingEmptyByte(GETTYSBURG_ADDRESS_BYTES) - commitments, err := p.GetCommitments(data) + commitments, err := p.GetCommitmentsForPaddedLength(data) if err != nil { t.Fatal(err) } @@ -109,13 +109,13 @@ func TestBlobCertHash(t *testing.T) { hash, err := blobCert.Hash() assert.NoError(t, err) - // 0xc4512b8702f69cb837fff50a93d3d28aada535b1f151b64db45859c3f5bb096a verified in solidity - assert.Equal(t, "c4512b8702f69cb837fff50a93d3d28aada535b1f151b64db45859c3f5bb096a", hex.EncodeToString(hash[:])) + // 0x3719a91e2a294feafdd624c1c88a6f1db1a5c79ee0863b352255bc9162f02751 verified in solidity + assert.Equal(t, "3719a91e2a294feafdd624c1c88a6f1db1a5c79ee0863b352255bc9162f02751", hex.EncodeToString(hash[:])) } func TestBlobCertSerialization(t *testing.T) { data := codec.ConvertByPaddingEmptyByte(GETTYSBURG_ADDRESS_BYTES) - commitments, err := p.GetCommitments(data) + commitments, err := p.GetCommitmentsForPaddedLength(data) if err != nil { t.Fatal(err) } diff --git a/core/v2/types_test.go b/core/v2/types_test.go index 89ea5256ad..a5e25f0d10 100644 --- a/core/v2/types_test.go +++ b/core/v2/types_test.go @@ -12,7 +12,7 @@ import ( func TestConvertBatchToFromProtobuf(t *testing.T) { data := codec.ConvertByPaddingEmptyByte(GETTYSBURG_ADDRESS_BYTES) - commitments, err := p.GetCommitments(data) + commitments, err := p.GetCommitmentsForPaddedLength(data) if err != nil { t.Fatal(err) } @@ -68,7 +68,7 @@ func TestConvertBatchToFromProtobuf(t *testing.T) { func TestConvertBlobHeaderToFromProtobuf(t *testing.T) { data := codec.ConvertByPaddingEmptyByte(GETTYSBURG_ADDRESS_BYTES) - commitments, err := p.GetCommitments(data) + commitments, err := p.GetCommitmentsForPaddedLength(data) if err != nil { t.Fatal(err) } @@ -96,7 +96,7 @@ func TestConvertBlobHeaderToFromProtobuf(t *testing.T) { func TestConvertBlobCertToFromProtobuf(t *testing.T) { data := codec.ConvertByPaddingEmptyByte(GETTYSBURG_ADDRESS_BYTES) - commitments, err := p.GetCommitments(data) + commitments, err := p.GetCommitmentsForPaddedLength(data) if err != nil { t.Fatal(err) } diff --git a/disperser/apiserver/server_v2.go b/disperser/apiserver/server_v2.go index 06cb69b04e..77f22dec5a 100644 --- a/disperser/apiserver/server_v2.go +++ b/disperser/apiserver/server_v2.go @@ -160,7 +160,7 @@ func (s *DispersalServerV2) GetBlobCommitment(ctx context.Context, req *pb.BlobC if uint64(blobSize) > s.maxNumSymbolsPerBlob*encoding.BYTES_PER_SYMBOL { return nil, api.NewErrorInvalidArg(fmt.Sprintf("blob size cannot exceed %v bytes", s.maxNumSymbolsPerBlob*encoding.BYTES_PER_SYMBOL)) } - c, err := s.prover.GetCommitments(req.GetData()) + c, err := s.prover.GetCommitmentsForPaddedLength(req.GetData()) if err != nil { return nil, api.NewErrorInternal("failed to get commitments") } diff --git a/disperser/apiserver/server_v2_test.go b/disperser/apiserver/server_v2_test.go index 0c2697366f..e70fb3c823 100644 --- a/disperser/apiserver/server_v2_test.go +++ b/disperser/apiserver/server_v2_test.go @@ -51,7 +51,7 @@ func TestV2DisperseBlob(t *testing.T) { assert.NoError(t, err) data = codec.ConvertByPaddingEmptyByte(data) - commitments, err := prover.GetCommitments(data) + commitments, err := prover.GetCommitmentsForPaddedLength(data) assert.NoError(t, err) accountID, err := c.Signer.GetAccountID() assert.NoError(t, err) @@ -111,7 +111,7 @@ func TestV2DisperseBlobRequestValidation(t *testing.T) { assert.NoError(t, err) data = codec.ConvertByPaddingEmptyByte(data) - commitments, err := prover.GetCommitments(data) + commitments, err := prover.GetCommitmentsForPaddedLength(data) assert.NoError(t, err) accountID, err := c.Signer.GetAccountID() assert.NoError(t, err) @@ -313,7 +313,7 @@ func TestV2GetBlobCommitment(t *testing.T) { assert.NoError(t, err) data = codec.ConvertByPaddingEmptyByte(data) - commit, err := prover.GetCommitments(data) + commit, err := prover.GetCommitmentsForPaddedLength(data) require.NoError(t, err) reply, err := c.DispersalServerV2.GetBlobCommitment(context.Background(), &pbv2.BlobCommitmentRequest{ Data: data, diff --git a/encoding/encoding.go b/encoding/encoding.go index 221e851ad8..b4af4ac2db 100644 --- a/encoding/encoding.go +++ b/encoding/encoding.go @@ -12,7 +12,7 @@ type Prover interface { // reconstruct the blob. EncodeAndProve(data []byte, params EncodingParams) (BlobCommitments, []*Frame, error) - GetCommitments(data []byte) (BlobCommitments, error) + GetCommitmentsForPaddedLength(data []byte) (BlobCommitments, error) GetFrames(data []byte, params EncodingParams) ([]*Frame, error) diff --git a/encoding/kzg/prover/cpu/multiframe_proof.go b/encoding/kzg/prover/cpu/multiframe_proof.go index e06228f542..10e1084fa9 100644 --- a/encoding/kzg/prover/cpu/multiframe_proof.go +++ b/encoding/kzg/prover/cpu/multiframe_proof.go @@ -28,7 +28,17 @@ type WorkerResult struct { func (p *KzgCpuProofDevice) ComputeLengthProof(coeffs []fr.Element) (*bn254.G2Affine, error) { inputLength := uint64(len(coeffs)) - shiftedSecret := p.G2Trailing[p.KzgConfig.SRSNumberToLoad-inputLength:] + return p.ComputeLengthProofForLength(coeffs, inputLength) +} + +func (p *KzgCpuProofDevice) ComputeLengthProofForLength(coeffs []fr.Element, length uint64) (*bn254.G2Affine, error) { + + if length < uint64(len(coeffs)) { + return nil, fmt.Errorf("length is less than the number of coefficients") + } + + start := p.KzgConfig.SRSNumberToLoad - length + shiftedSecret := p.G2Trailing[start : start+uint64(len(coeffs))] config := ecc.MultiExpConfig{} //The proof of low degree is commitment of the polynomial shifted to the largest srs degree var lengthProof bn254.G2Affine @@ -37,6 +47,7 @@ func (p *KzgCpuProofDevice) ComputeLengthProof(coeffs []fr.Element) (*bn254.G2Af return nil, err } return &lengthProof, nil + } func (p *KzgCpuProofDevice) ComputeCommitment(coeffs []fr.Element) (*bn254.G1Affine, error) { diff --git a/encoding/kzg/prover/parametrized_prover.go b/encoding/kzg/prover/parametrized_prover.go index b30927ac5c..0af9ab91f7 100644 --- a/encoding/kzg/prover/parametrized_prover.go +++ b/encoding/kzg/prover/parametrized_prover.go @@ -32,19 +32,19 @@ type rsEncodeResult struct { } type lengthCommitmentResult struct { - LengthCommitment bn254.G2Affine + LengthCommitment *bn254.G2Affine Duration time.Duration Err error } type lengthProofResult struct { - LengthProof bn254.G2Affine + LengthProof *bn254.G2Affine Duration time.Duration Err error } type commitmentResult struct { - Commitment bn254.G1Affine + Commitment *bn254.G1Affine Duration time.Duration Err error } @@ -84,7 +84,7 @@ func (g *ParametrizedProver) Encode(inputFr []fr.Element) (*bn254.G1Affine, *bn2 // inputFr is untouched // compute chunks go func() { - commitment, lengthCommitment, lengthProof, err := g.GetCommitments(inputFr) + commitment, lengthCommitment, lengthProof, err := g.GetCommitments(inputFr, uint64(len(inputFr))) commitmentsChan <- commitmentsResult{ commitment: commitment, @@ -112,7 +112,7 @@ func (g *ParametrizedProver) Encode(inputFr []fr.Element) (*bn254.G1Affine, *bn2 return commitmentResult.commitment, commitmentResult.lengthCommitment, commitmentResult.lengthProof, frames, indices, nil } -func (g *ParametrizedProver) GetCommitments(inputFr []fr.Element) (*bn254.G1Affine, *bn254.G2Affine, *bn254.G2Affine, error) { +func (g *ParametrizedProver) GetCommitments(inputFr []fr.Element, length uint64) (*bn254.G1Affine, *bn254.G2Affine, *bn254.G2Affine, error) { if err := g.validateInput(inputFr); err != nil { return nil, nil, nil, err } @@ -128,7 +128,7 @@ func (g *ParametrizedProver) GetCommitments(inputFr []fr.Element) (*bn254.G1Affi start := time.Now() commit, err := g.Computer.ComputeCommitment(inputFr) commitmentChan <- commitmentResult{ - Commitment: *commit, + Commitment: commit, Err: err, Duration: time.Since(start), } @@ -138,7 +138,7 @@ func (g *ParametrizedProver) GetCommitments(inputFr []fr.Element) (*bn254.G1Affi start := time.Now() lengthCommitment, err := g.Computer.ComputeLengthCommitment(inputFr) lengthCommitmentChan <- lengthCommitmentResult{ - LengthCommitment: *lengthCommitment, + LengthCommitment: lengthCommitment, Err: err, Duration: time.Since(start), } @@ -146,9 +146,9 @@ func (g *ParametrizedProver) GetCommitments(inputFr []fr.Element) (*bn254.G1Affi go func() { start := time.Now() - lengthProof, err := g.Computer.ComputeLengthProof(inputFr) + lengthProof, err := g.Computer.ComputeLengthProofForLength(inputFr, length) lengthProofChan <- lengthProofResult{ - LengthProof: *lengthProof, + LengthProof: lengthProof, Err: err, Duration: time.Since(start), } @@ -175,7 +175,7 @@ func (g *ParametrizedProver) GetCommitments(inputFr []fr.Element) (*bn254.G1Affi if g.Verbose { log.Printf("Total encoding took %v\n", totalProcessingTime) } - return &commitmentResult.Commitment, &lengthCommitmentResult.LengthCommitment, &lengthProofResult.LengthProof, nil + return commitmentResult.Commitment, lengthCommitmentResult.LengthCommitment, lengthProofResult.LengthProof, nil } func (g *ParametrizedProver) GetFrames(inputFr []fr.Element) ([]encoding.Frame, []uint32, error) { diff --git a/encoding/kzg/prover/proof_device.go b/encoding/kzg/prover/proof_device.go index 4d6ff8cb30..b08f5196dd 100644 --- a/encoding/kzg/prover/proof_device.go +++ b/encoding/kzg/prover/proof_device.go @@ -12,4 +12,5 @@ type ProofDevice interface { ComputeMultiFrameProof(blobFr []fr.Element, numChunks, chunkLen, numWorker uint64) ([]bn254.G1Affine, error) ComputeLengthCommitment(blobFr []fr.Element) (*bn254.G2Affine, error) ComputeLengthProof(blobFr []fr.Element) (*bn254.G2Affine, error) + ComputeLengthProofForLength(blobFr []fr.Element, length uint64) (*bn254.G2Affine, error) } diff --git a/encoding/kzg/prover/prover.go b/encoding/kzg/prover/prover.go index d315c04b8d..a6aa5dea83 100644 --- a/encoding/kzg/prover/prover.go +++ b/encoding/kzg/prover/prover.go @@ -198,7 +198,7 @@ func (e *Prover) GetFrames(data []byte, params encoding.EncodingParams) ([]*enco return chunks, nil } -func (e *Prover) GetCommitments(data []byte) (encoding.BlobCommitments, error) { +func (e *Prover) GetCommitmentsForPaddedLength(data []byte) (encoding.BlobCommitments, error) { symbols, err := rs.ToFrArray(data) if err != nil { return encoding.BlobCommitments{}, err @@ -214,17 +214,18 @@ func (e *Prover) GetCommitments(data []byte) (encoding.BlobCommitments, error) { return encoding.BlobCommitments{}, err } - commit, lengthCommit, lengthProof, err := enc.GetCommitments(symbols) + length := encoding.NextPowerOf2(uint64(len(symbols))) + + commit, lengthCommit, lengthProof, err := enc.GetCommitments(symbols, length) if err != nil { return encoding.BlobCommitments{}, err } - length := uint(len(symbols)) commitments := encoding.BlobCommitments{ Commitment: (*encoding.G1Commitment)(commit), LengthCommitment: (*encoding.G2Commitment)(lengthCommit), LengthProof: (*encoding.G2Commitment)(lengthProof), - Length: length, + Length: uint(length), } return commitments, nil diff --git a/encoding/mock/encoder.go b/encoding/mock/encoder.go index 87ed90d026..ae327acf85 100644 --- a/encoding/mock/encoder.go +++ b/encoding/mock/encoder.go @@ -23,7 +23,7 @@ func (e *MockEncoder) EncodeAndProve(data []byte, params encoding.EncodingParams return args.Get(0).(encoding.BlobCommitments), args.Get(1).([]*encoding.Frame), args.Error(2) } -func (e *MockEncoder) GetCommitments(data []byte) (encoding.BlobCommitments, error) { +func (e *MockEncoder) GetCommitmentsForPaddedLength(data []byte) (encoding.BlobCommitments, error) { args := e.Called(data) time.Sleep(e.Delay) return args.Get(0).(encoding.BlobCommitments), args.Error(1) diff --git a/encoding/params.go b/encoding/params.go index 422fa0799c..b9150f5dfa 100644 --- a/encoding/params.go +++ b/encoding/params.go @@ -47,14 +47,14 @@ func ParamsFromMins[T constraints.Integer](minChunkLength, minNumChunks T) Encod func ParamsFromSysPar(numSys, numPar, dataSize uint64) EncodingParams { numNodes := numSys + numPar - dataLen := roundUpDivide(dataSize, BYTES_PER_SYMBOL) - chunkLen := roundUpDivide(dataLen, numSys) + dataLen := RoundUpDivide(dataSize, BYTES_PER_SYMBOL) + chunkLen := RoundUpDivide(dataLen, numSys) return ParamsFromMins(chunkLen, numNodes) } func GetNumSys(dataSize uint64, chunkLen uint64) uint64 { - dataLen := roundUpDivide(dataSize, BYTES_PER_SYMBOL) + dataLen := RoundUpDivide(dataSize, BYTES_PER_SYMBOL) numSys := dataLen / chunkLen return numSys } diff --git a/encoding/utils.go b/encoding/utils.go index 8af967648d..fe665fe7d5 100644 --- a/encoding/utils.go +++ b/encoding/utils.go @@ -8,8 +8,12 @@ import ( // GetBlobLength converts from blob size in bytes to blob size in symbols func GetBlobLength(blobSize uint) uint { - symSize := uint(BYTES_PER_SYMBOL) - return (blobSize + symSize - 1) / symSize + return RoundUpDivide(blobSize, BYTES_PER_SYMBOL) +} + +// GetBlobLength converts from blob size in bytes to blob size in symbols +func GetBlobLengthPowerOf2(blobSize uint) uint { + return NextPowerOf2(GetBlobLength(blobSize)) } // GetBlobSize converts from blob length in symbols to blob size in bytes. This is not an exact conversion. @@ -19,14 +23,14 @@ func GetBlobSize(blobLength uint) uint { // GetBlobLength converts from blob size in bytes to blob size in symbols func GetEncodedBlobLength(blobLength uint, quorumThreshold, advThreshold uint8) uint { - return roundUpDivide(blobLength*100, uint(quorumThreshold-advThreshold)) + return RoundUpDivide(blobLength*100, uint(quorumThreshold-advThreshold)) } -func NextPowerOf2(d uint64) uint64 { - nextPower := math.Ceil(math.Log2(float64(d))) - return uint64(math.Pow(2.0, nextPower)) +func RoundUpDivide[T constraints.Integer](a, b T) T { + return (a + b - 1) / b } -func roundUpDivide[T constraints.Integer](a, b T) T { - return (a + b - 1) / b +func NextPowerOf2[T constraints.Integer](d T) T { + nextPower := math.Ceil(math.Log2(float64(d))) + return T(math.Pow(2.0, nextPower)) } diff --git a/relay/relay_test_utils.go b/relay/relay_test_utils.go index f850b65cc7..9e99abe929 100644 --- a/relay/relay_test_utils.go +++ b/relay/relay_test_utils.go @@ -181,7 +181,7 @@ func randomBlob(t *testing.T) (*v2.BlobHeader, []byte) { data := tu.RandomBytes(225) // TODO talk to Ian about this data = codec.ConvertByPaddingEmptyByte(data) - commitments, err := prover.GetCommitments(data) + commitments, err := prover.GetCommitmentsForPaddedLength(data) require.NoError(t, err) require.NoError(t, err) commitmentProto, err := commitments.ToProtobuf() From eef48b67691f76f1565a904a1ec9b4674b169ad4 Mon Sep 17 00:00:00 2001 From: Gwenall Date: Wed, 20 Nov 2024 02:54:30 +0100 Subject: [PATCH 6/8] Fix Subgraph README.md typo (#890) --- subgraphs/README.md | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/subgraphs/README.md b/subgraphs/README.md index c6a39bd55e..e8cebd5a0a 100644 --- a/subgraphs/README.md +++ b/subgraphs/README.md @@ -18,7 +18,7 @@ yarn global add @graphprotocol/graph-cli # install if u haven't # or install on MacOS npm install -g @graphprotocol/graph-cli -graph init ----from-contract --network {goerli,mainnet} --abi abis/Contract.json +graph init --from-contract --network {goerli,mainnet} --abi abis/Contract.json ``` And go through the dialog. @@ -46,4 +46,4 @@ npm run codegen ## Develop more -Check out the [graph docs](https://thegraph.com/docs/en/network/overview/). \ No newline at end of file +Check out the [graph docs](https://thegraph.com/docs/en/network/overview/). From f7ab152818b22e34eaab8f41f3aab6e9c714f25c Mon Sep 17 00:00:00 2001 From: Ian Shim <100327837+ian-shim@users.noreply.github.com> Date: Wed, 20 Nov 2024 10:30:32 -0800 Subject: [PATCH 7/8] [v2] Remove rate limiter from disperser (#909) --- disperser/apiserver/disperse_blob_v2.go | 8 +---- disperser/apiserver/server_v2.go | 39 ------------------------- disperser/cmd/controller/config.go | 2 -- 3 files changed, 1 insertion(+), 48 deletions(-) diff --git a/disperser/apiserver/disperse_blob_v2.go b/disperser/apiserver/disperse_blob_v2.go index d39d75f2cd..304724eb64 100644 --- a/disperser/apiserver/disperse_blob_v2.go +++ b/disperser/apiserver/disperse_blob_v2.go @@ -7,7 +7,6 @@ import ( "github.com/Layr-Labs/eigenda/api" pb "github.com/Layr-Labs/eigenda/api/grpc/disperser/v2" - "github.com/Layr-Labs/eigenda/common" corev2 "github.com/Layr-Labs/eigenda/core/v2" dispv2 "github.com/Layr-Labs/eigenda/disperser/common/v2" "github.com/Layr-Labs/eigenda/encoding" @@ -19,17 +18,12 @@ func (s *DispersalServerV2) DisperseBlob(ctx context.Context, req *pb.DisperseBl return nil, err } - origin, err := common.GetClientAddress(ctx, s.rateConfig.ClientIPHeader, 2, true) - if err != nil { - return nil, api.NewErrorInvalidArg(err.Error()) - } - data := req.GetData() blobHeader, err := corev2.BlobHeaderFromProtobuf(req.GetBlobHeader()) if err != nil { return nil, api.NewErrorInternal(err.Error()) } - s.logger.Debug("received a new blob dispersal request", "origin", origin, "blobSizeBytes", len(data), "quorums", req.GetBlobHeader().GetQuorumNumbers()) + s.logger.Debug("received a new blob dispersal request", "blobSizeBytes", len(data), "quorums", req.GetBlobHeader().GetQuorumNumbers()) // TODO(ian-shim): handle payments and check rate limits diff --git a/disperser/apiserver/server_v2.go b/disperser/apiserver/server_v2.go index 77f22dec5a..e3cbf733f2 100644 --- a/disperser/apiserver/server_v2.go +++ b/disperser/apiserver/server_v2.go @@ -33,12 +33,10 @@ type DispersalServerV2 struct { pb.UnimplementedDisperserServer serverConfig disperser.ServerConfig - rateConfig RateConfig blobStore *blobstore.BlobStore blobMetadataStore *blobstore.BlobMetadataStore chainReader core.Reader - ratelimiter common.RateLimiter authenticator corev2.BlobRequestAuthenticator prover encoding.Prover logger logging.Logger @@ -67,12 +65,10 @@ func NewDispersalServerV2( return &DispersalServerV2{ serverConfig: serverConfig, - rateConfig: rateConfig, blobStore: blobStore, blobMetadataStore: blobMetadataStore, chainReader: chainReader, - ratelimiter: ratelimiter, authenticator: authenticator, prover: prover, logger: logger, @@ -105,10 +101,6 @@ func (s *DispersalServerV2) Start(ctx context.Context) error { return fmt.Errorf("failed to refresh onchain quorum state: %w", err) } - if err := s.RefreshAllowlist(); err != nil { - return fmt.Errorf("failed to refresh allowlist: %w", err) - } - go func() { ticker := time.NewTicker(s.onchainStateRefreshInterval) defer ticker.Stop() @@ -125,21 +117,6 @@ func (s *DispersalServerV2) Start(ctx context.Context) error { } }() - go func() { - t := time.NewTicker(s.rateConfig.AllowlistRefreshInterval) - defer t.Stop() - for { - select { - case <-t.C: - if err := s.RefreshAllowlist(); err != nil { - s.logger.Error("failed to refresh allowlist", "err", err) - } - case <-ctx.Done(): - return - } - } - }() - s.logger.Info("GRPC Listening", "port", s.serverConfig.GrpcPort, "address", listener.Addr().String()) if err := gs.Serve(listener); err != nil { @@ -186,22 +163,6 @@ func (s *DispersalServerV2) GetBlobCommitment(ctx context.Context, req *pb.BlobC }}, nil } -func (s *DispersalServerV2) RefreshAllowlist() error { - s.logger.Debug("Refreshing onchain quorum state") - al, err := ReadAllowlistFromFile(s.rateConfig.AllowlistFile) - if err != nil { - return fmt.Errorf("failed to load allowlist: %w", err) - } - s.rateConfig.Allowlist = al - for account, rateInfoByQuorum := range al { - for quorumID, rateInfo := range rateInfoByQuorum { - s.logger.Info("[Allowlist]", "account", account, "name", rateInfo.Name, "quorumID", quorumID, "throughput", rateInfo.Throughput, "blobRate", rateInfo.BlobRate) - } - } - - return nil -} - // refreshOnchainState refreshes the onchain quorum state. // It should be called periodically to keep the state up to date. // **Note** that there is no lock. If the state is being updated concurrently, it may lead to inconsistent state. diff --git a/disperser/cmd/controller/config.go b/disperser/cmd/controller/config.go index 1a5ded7edd..00483ee202 100644 --- a/disperser/cmd/controller/config.go +++ b/disperser/cmd/controller/config.go @@ -31,7 +31,6 @@ type Config struct { IndexerConfig indexer.Config ChainStateConfig thegraph.Config UseGraph bool - IndexerDataDir string BLSOperatorStateRetrieverAddr string EigenDAServiceManagerAddr string @@ -84,7 +83,6 @@ func NewConfig(ctx *cli.Context) (Config, error) { IndexerConfig: indexer.ReadIndexerConfig(ctx), ChainStateConfig: thegraph.ReadCLIConfig(ctx), UseGraph: ctx.GlobalBool(flags.UseGraphFlag.Name), - IndexerDataDir: ctx.GlobalString(flags.IndexerDataDirFlag.Name), BLSOperatorStateRetrieverAddr: ctx.GlobalString(flags.BlsOperatorStateRetrieverFlag.Name), EigenDAServiceManagerAddr: ctx.GlobalString(flags.EigenDAServiceManagerFlag.Name), From 5ce2098c1e97ef1a2dd2b79824a0fb49e9dd38a8 Mon Sep 17 00:00:00 2001 From: quaq <56312047+0x0aa0@users.noreply.github.com> Date: Wed, 20 Nov 2024 16:11:54 -0600 Subject: [PATCH 8/8] V2 interfaces (#920) --- contracts/script/EigenDADeployer.s.sol | 6 +- contracts/script/GenerateUnitTestHashes.s.sol | 12 +- contracts/src/core/EigenDAServiceManager.sol | 57 ++++++- .../src/core/EigenDAServiceManagerStorage.sol | 36 ++--- .../IEigenDABatchMetadataStorage.sol | 6 + .../src/interfaces/IEigenDABlobVerifier.sol | 141 ++++++++++++++++++ .../src/interfaces/IEigenDARelayRegistry.sol | 15 ++ .../src/interfaces/IEigenDAServiceManager.sol | 61 +------- .../interfaces/IEigenDASignatureVerifier.sol | 13 ++ contracts/src/interfaces/IEigenDAStructs.sol | 128 ++++++++++++++++ .../interfaces/IEigenDAThresholdRegistry.sol | 37 +++++ contracts/src/libraries/EigenDAHasher.sol | 38 +++-- .../src/libraries/EigenDARollupUtils.sol | 14 +- contracts/src/rollup/MockRollup.sol | 5 +- .../harnesses/EigenDABlobUtilsHarness.sol | 9 +- contracts/test/unit/EigenDABlobUtils.t.sol | 92 ++++++------ .../test/unit/EigenDAServiceManagerUnit.t.sol | 35 +++-- contracts/test/unit/MockRollup.t.sol | 34 +++-- 18 files changed, 549 insertions(+), 190 deletions(-) create mode 100644 contracts/src/interfaces/IEigenDABatchMetadataStorage.sol create mode 100644 contracts/src/interfaces/IEigenDABlobVerifier.sol create mode 100644 contracts/src/interfaces/IEigenDARelayRegistry.sol create mode 100644 contracts/src/interfaces/IEigenDASignatureVerifier.sol create mode 100644 contracts/src/interfaces/IEigenDAStructs.sol create mode 100644 contracts/src/interfaces/IEigenDAThresholdRegistry.sol diff --git a/contracts/script/EigenDADeployer.s.sol b/contracts/script/EigenDADeployer.s.sol index 6abb1cc4c2..245fc8145b 100644 --- a/contracts/script/EigenDADeployer.s.sol +++ b/contracts/script/EigenDADeployer.s.sol @@ -17,6 +17,8 @@ import {IBLSApkRegistry} from "eigenlayer-middleware/interfaces/IBLSApkRegistry. import {EigenDAServiceManager, IAVSDirectory, IRewardsCoordinator} from "../src/core/EigenDAServiceManager.sol"; import {EigenDAHasher} from "../src/libraries/EigenDAHasher.sol"; import {ISocketRegistry, SocketRegistry} from "eigenlayer-middleware/SocketRegistry.sol"; +import {IEigenDAThresholdRegistry} from "../src/interfaces/IEigenDAThresholdRegistry.sol"; +import {IEigenDARelayRegistry} from "../src/interfaces/IEigenDARelayRegistry.sol"; import {DeployOpenEigenLayer, ProxyAdmin, ERC20PresetFixedSupply, TransparentUpgradeableProxy, IPauserRegistry} from "./DeployOpenEigenLayer.s.sol"; import "forge-std/Test.sol"; @@ -202,7 +204,9 @@ contract EigenDADeployer is DeployOpenEigenLayer { avsDirectory, rewardsCoordinator, registryCoordinator, - stakeRegistry + stakeRegistry, + IEigenDAThresholdRegistry(address(0)), + IEigenDARelayRegistry(address(0)) ); address[] memory confirmers = new address[](1); diff --git a/contracts/script/GenerateUnitTestHashes.s.sol b/contracts/script/GenerateUnitTestHashes.s.sol index d302a65e37..6ae47829ca 100644 --- a/contracts/script/GenerateUnitTestHashes.s.sol +++ b/contracts/script/GenerateUnitTestHashes.s.sol @@ -5,7 +5,7 @@ import "../src/interfaces/IEigenDAServiceManager.sol"; import "forge-std/Script.sol"; import "forge-std/console.sol"; - +import "../src/interfaces/IEigenDAStructs.sol"; // # To generate the hashes needed for core/serialization_test.go: // forge script script/GenerateUnitTestHashes.s.sol -v @@ -18,9 +18,9 @@ contract GenerateHashes is Script { function run() external { - IEigenDAServiceManager.QuorumBlobParam[] memory quorumBlobParam = new IEigenDAServiceManager.QuorumBlobParam[](1); + QuorumBlobParam[] memory quorumBlobParam = new QuorumBlobParam[](1); - quorumBlobParam[0] = IEigenDAServiceManager.QuorumBlobParam({ + quorumBlobParam[0] = QuorumBlobParam({ quorumNumber: 0, adversaryThresholdPercentage: 80, confirmationThresholdPercentage: 100, @@ -37,14 +37,14 @@ contract GenerateHashes is Script { }); - quorumBlobParam[0] = IEigenDAServiceManager.QuorumBlobParam({ + quorumBlobParam[0] = QuorumBlobParam({ quorumNumber: 1, adversaryThresholdPercentage: 80, confirmationThresholdPercentage: 100, chunkLength: 10 }); - IEigenDAServiceManager.BlobHeader memory header = IEigenDAServiceManager.BlobHeader({ + BlobHeader memory header = BlobHeader({ commitment: commitment, dataLength: 10, quorumBlobParams: quorumBlobParam @@ -59,4 +59,4 @@ contract GenerateHashes is Script { } -} +} \ No newline at end of file diff --git a/contracts/src/core/EigenDAServiceManager.sol b/contracts/src/core/EigenDAServiceManager.sol index 4fbdfc5f3a..f49c3206df 100644 --- a/contracts/src/core/EigenDAServiceManager.sol +++ b/contracts/src/core/EigenDAServiceManager.sol @@ -1,4 +1,4 @@ -// SPDX-License-Identifier: UNLICENSED +// SPDX-License-Identifier: MIT pragma solidity ^0.8.9; import {Pausable} from "eigenlayer-core/contracts/permissions/Pausable.sol"; @@ -8,9 +8,11 @@ import {ServiceManagerBase, IAVSDirectory, IRewardsCoordinator, IServiceManager} import {BLSSignatureChecker} from "eigenlayer-middleware/BLSSignatureChecker.sol"; import {IRegistryCoordinator} from "eigenlayer-middleware/interfaces/IRegistryCoordinator.sol"; import {IStakeRegistry} from "eigenlayer-middleware/interfaces/IStakeRegistry.sol"; - +import {IEigenDAThresholdRegistry} from "../interfaces/IEigenDAThresholdRegistry.sol"; +import {IEigenDARelayRegistry} from "../interfaces/IEigenDARelayRegistry.sol"; import {EigenDAServiceManagerStorage} from "./EigenDAServiceManagerStorage.sol"; import {EigenDAHasher} from "../libraries/EigenDAHasher.sol"; +import "../interfaces/IEigenDAStructs.sol"; /** * @title Primary entrypoint for procuring services from EigenDA. @@ -36,10 +38,13 @@ contract EigenDAServiceManager is EigenDAServiceManagerStorage, ServiceManagerBa IAVSDirectory __avsDirectory, IRewardsCoordinator __rewardsCoordinator, IRegistryCoordinator __registryCoordinator, - IStakeRegistry __stakeRegistry + IStakeRegistry __stakeRegistry, + IEigenDAThresholdRegistry __eigenDAThresholdRegistry, + IEigenDARelayRegistry __eigenDARelayRegistry ) BLSSignatureChecker(__registryCoordinator) ServiceManagerBase(__avsDirectory, __rewardsCoordinator, __registryCoordinator, __stakeRegistry) + EigenDAServiceManagerStorage(__eigenDAThresholdRegistry, __eigenDARelayRegistry) { _disableInitializers(); } @@ -110,7 +115,7 @@ contract EigenDAServiceManager is EigenDAServiceManagerStorage, ServiceManagerBa // signed stake > total stake require( quorumStakeTotals.signedStakeForQuorum[i] * THRESHOLD_DENOMINATOR >= - quorumStakeTotals.totalStakeForQuorum[i] * uint8(batchHeader.signedStakeForQuorums[i]), + quorumStakeTotals.totalStakeForQuorum[i] * uint8(batchHeader.signedStakeForQuorums[i]), "EigenDAServiceManager.confirmBatch: signatories do not own at least threshold percentage of a quorum" ); } @@ -147,4 +152,48 @@ contract EigenDAServiceManager is EigenDAServiceManagerStorage, ServiceManagerBa return referenceBlockNumber + STORE_DURATION_BLOCKS + BLOCK_STALE_MEASURE; } + /// @notice Returns the blob params for a given blob version + function getBlobParams(uint16 version) external view returns (VersionedBlobParams memory) { + return eigenDAThresholdRegistry.getBlobParams(version); + } + + /// @notice Returns the bytes array of quorumAdversaryThresholdPercentages + function quorumAdversaryThresholdPercentages() external view returns (bytes memory) { + return hex"212121"; + } + + /// @notice Returns the bytes array of quorumAdversaryThresholdPercentages + function quorumConfirmationThresholdPercentages() external view returns (bytes memory) { + return hex"373737"; + } + + /// @notice Returns the bytes array of quorumsNumbersRequired + function quorumNumbersRequired() external view returns (bytes memory) { + return hex"0001"; + } + + function getQuorumAdversaryThresholdPercentage( + uint8 quorumNumber + ) external view returns (uint8){ + return eigenDAThresholdRegistry.getQuorumAdversaryThresholdPercentage(quorumNumber); + } + + /// @notice Gets the confirmation threshold percentage for a quorum + function getQuorumConfirmationThresholdPercentage( + uint8 quorumNumber + ) external view returns (uint8){ + return eigenDAThresholdRegistry.getQuorumConfirmationThresholdPercentage(quorumNumber); + } + + /// @notice Checks if a quorum is required + function getIsQuorumRequired( + uint8 quorumNumber + ) external view returns (bool){ + return eigenDAThresholdRegistry.getIsQuorumRequired(quorumNumber); + } + + /// @notice Gets the default security thresholds for V2 + function getDefaultSecurityThresholdsV2() external view returns (SecurityThresholds memory) { + return eigenDAThresholdRegistry.getDefaultSecurityThresholdsV2(); + } } \ No newline at end of file diff --git a/contracts/src/core/EigenDAServiceManagerStorage.sol b/contracts/src/core/EigenDAServiceManagerStorage.sol index 488f9ac60f..843a1107c7 100644 --- a/contracts/src/core/EigenDAServiceManagerStorage.sol +++ b/contracts/src/core/EigenDAServiceManagerStorage.sol @@ -1,8 +1,9 @@ -// SPDX-License-Identifier: UNLICENSED +// SPDX-License-Identifier: MIT pragma solidity ^0.8.9; import {IEigenDAServiceManager} from "../interfaces/IEigenDAServiceManager.sol"; - +import {IEigenDAThresholdRegistry} from "../interfaces/IEigenDAThresholdRegistry.sol"; +import {IEigenDARelayRegistry} from "../interfaces/IEigenDARelayRegistry.sol"; /** * @title Storage variables for the `EigenDAServiceManager` contract. * @author Layr Labs, Inc. @@ -12,7 +13,6 @@ abstract contract EigenDAServiceManagerStorage is IEigenDAServiceManager { // CONSTANTS uint256 public constant THRESHOLD_DENOMINATOR = 100; - //TODO: mechanism to change any of these values? /// @notice Unit of measure (in blocks) for which data will be stored for after confirmation. uint32 public constant STORE_DURATION_BLOCKS = 2 weeks / 12 seconds; @@ -36,26 +36,16 @@ abstract contract EigenDAServiceManagerStorage is IEigenDAServiceManager { */ uint32 public constant BLOCK_STALE_MEASURE = 300; - /** - * @notice The quorum adversary threshold percentages stored as an ordered bytes array - * this is the percentage of the total stake that must be adversarial to consider a blob invalid. - * The first byte is the threshold for quorum 0, the second byte is the threshold for quorum 1, etc. - */ - bytes public constant quorumAdversaryThresholdPercentages = hex"212121"; - - /** - * @notice The quorum confirmation threshold percentages stored as an ordered bytes array - * this is the percentage of the total stake needed to confirm a blob. - * The first byte is the threshold for quorum 0, the second byte is the threshold for quorum 1, etc. - */ - bytes public constant quorumConfirmationThresholdPercentages = hex"373737"; + IEigenDAThresholdRegistry public immutable eigenDAThresholdRegistry; + IEigenDARelayRegistry public immutable eigenDARelayRegistry; - /** - * @notice The quorum numbers required for confirmation stored as an ordered bytes array - * these quorum numbers have respective canonical thresholds in the - * quorumConfirmationThresholdPercentages and quorumAdversaryThresholdPercentages above. - */ - bytes public constant quorumNumbersRequired = hex"0001"; + constructor( + IEigenDAThresholdRegistry _eigenDAThresholdRegistry, + IEigenDARelayRegistry _eigenDARelayRegistry + ) { + eigenDAThresholdRegistry = _eigenDAThresholdRegistry; + eigenDARelayRegistry = _eigenDARelayRegistry; + } /// @notice The current batchId uint32 public batchId; @@ -69,4 +59,4 @@ abstract contract EigenDAServiceManagerStorage is IEigenDAServiceManager { // storage gap for upgradeability // slither-disable-next-line shadowing-state uint256[47] private __GAP; -} +} \ No newline at end of file diff --git a/contracts/src/interfaces/IEigenDABatchMetadataStorage.sol b/contracts/src/interfaces/IEigenDABatchMetadataStorage.sol new file mode 100644 index 0000000000..1904c505c7 --- /dev/null +++ b/contracts/src/interfaces/IEigenDABatchMetadataStorage.sol @@ -0,0 +1,6 @@ +// SPDX-License-Identifier: MIT +pragma solidity ^0.8.9; + +interface IEigenDABatchMetadataStorage { + function batchIdToBatchMetadataHash(uint32 batchId) external view returns (bytes32); +} \ No newline at end of file diff --git a/contracts/src/interfaces/IEigenDABlobVerifier.sol b/contracts/src/interfaces/IEigenDABlobVerifier.sol new file mode 100644 index 0000000000..7cca2aa71c --- /dev/null +++ b/contracts/src/interfaces/IEigenDABlobVerifier.sol @@ -0,0 +1,141 @@ +// SPDX-License-Identifier: MIT +pragma solidity ^0.8.9; + +import "./IEigenDAStructs.sol"; +import "./IEigenDAThresholdRegistry.sol"; + +interface IEigenDABlobVerifier is IEigenDAThresholdRegistry { + + /** + * @notice Verifies a the blob is valid for the required quorums + * @param blobHeader The blob header to verify + * @param blobVerificationProof The blob verification proof to verify the blob against + */ + function verifyBlobV1( + BlobHeader calldata blobHeader, + BlobVerificationProof calldata blobVerificationProof + ) external view; + + /** + * @notice Verifies that a blob is valid for the required quorums and additional quorums + * @param blobHeader The blob header to verify + * @param blobVerificationProof The blob verification proof to verify the blob against + * @param additionalQuorumNumbersRequired The additional required quorum numbers + */ + function verifyBlobV1( + BlobHeader calldata blobHeader, + BlobVerificationProof calldata blobVerificationProof, + bytes calldata additionalQuorumNumbersRequired + ) external view; + + /** + * @notice Verifies a batch of blobs for the required quorums + * @param blobHeaders The blob headers to verify + * @param blobVerificationProofs The blob verification proofs to verify the blobs against + */ + function verifyBlobsV1( + BlobHeader[] calldata blobHeaders, + BlobVerificationProof[] calldata blobVerificationProofs + ) external view; + + /** + * @notice Verifies a batch of blobs for the required quorums and additional quorums + * @param blobHeaders The blob headers to verify + * @param blobVerificationProofs The blob verification proofs to verify the blobs against + * @param additionalQuorumNumbersRequired The additional required quorum numbers + */ + function verifyBlobsV1( + BlobHeader[] calldata blobHeaders, + BlobVerificationProof[] calldata blobVerificationProofs, + bytes calldata additionalQuorumNumbersRequired + ) external view; + + + /** + * @notice Verifies a blob for the required quorums and the default security thresholds + * @param batchHeader The batch header of the blob + * @param blobVerificationProof The blob verification proof for the blob + * @param nonSignerStakesAndSignature The nonSignerStakesAndSignature to verify the blob against + */ + function verifyBlobV2( + BatchHeaderV2 calldata batchHeader, + BlobVerificationProofV2 calldata blobVerificationProof, + NonSignerStakesAndSignature calldata nonSignerStakesAndSignature + ) external view; + + /** + * @notice Verifies a blob for the required quorums and additional quorums + * @param batchHeader The batch header of the blob + * @param blobVerificationProof The blob verification proof for the blob + * @param nonSignerStakesAndSignature The nonSignerStakesAndSignature to verify the blob against + * @param additionalQuorumNumbersRequired The additional required quorum numbers + */ + function verifyBlobV2( + BatchHeaderV2 calldata batchHeader, + BlobVerificationProofV2 calldata blobVerificationProof, + NonSignerStakesAndSignature calldata nonSignerStakesAndSignature, + bytes calldata additionalQuorumNumbersRequired + ) external view; + + /** + * @notice Verifies a blob for the required quorums and additional quorums and a custom security threshold + * @param batchHeader The batch header of the blob + * @param blobVerificationProof The blob verification proof for the blob + * @param nonSignerStakesAndSignature The nonSignerStakesAndSignature to verify the blob against + * @param securityThreshold The custom security threshold to verify the blob against + * @param additionalQuorumNumbersRequired The additional required quorum numbers + */ + function verifyBlobV2( + BatchHeaderV2 calldata batchHeader, + BlobVerificationProofV2 calldata blobVerificationProof, + NonSignerStakesAndSignature calldata nonSignerStakesAndSignature, + SecurityThresholds memory securityThreshold, + bytes calldata additionalQuorumNumbersRequired + ) external view; + + /** + * @notice Verifies a batch of blobs for the required quorums and additional quorums and a set of custom security threshold + * @param batchHeader The batch headers of the blobs + * @param blobVerificationProof The blob verification proofs for the blobs + * @param nonSignerStakesAndSignature The nonSignerStakesAndSignatures to verify the blobs against + * @param securityThresholds The set of custom security thresholds to verify the blobs against + * @param additionalQuorumNumbersRequired The additional required quorum numbers + */ + function verifyBlobV2( + BatchHeaderV2 calldata batchHeader, + BlobVerificationProofV2 calldata blobVerificationProof, + NonSignerStakesAndSignature calldata nonSignerStakesAndSignature, + SecurityThresholds[] memory securityThresholds, + bytes calldata additionalQuorumNumbersRequired + ) external view; + + /** + * @notice Returns the nonSignerStakesAndSignature for a given blob and signed batch + * @param signedBatch The signed batch to get the nonSignerStakesAndSignature for + * @param blobHeader The blob header of the blob in the signed batch + */ + function getNonSignerStakesAndSignature( + SignedBatch calldata signedBatch, + BlobHeaderV2 calldata blobHeader + ) external view returns (NonSignerStakesAndSignature memory); + + /** + * @notice Verifies the security parameters for a blob + * @param blobParams The blob params to verify + * @param securityThresholds The security thresholds to verify against + */ + function verifyBlobSecurityParams( + VersionedBlobParams memory blobParams, + SecurityThresholds memory securityThresholds + ) external view; + + /** + * @notice Verifies the security parameters for a blob + * @param version The version of the blob to verify + * @param securityThresholds The security thresholds to verify against + */ + function verifyBlobSecurityParams( + uint16 version, + SecurityThresholds memory securityThresholds + ) external view; +} \ No newline at end of file diff --git a/contracts/src/interfaces/IEigenDARelayRegistry.sol b/contracts/src/interfaces/IEigenDARelayRegistry.sol new file mode 100644 index 0000000000..7450e82e68 --- /dev/null +++ b/contracts/src/interfaces/IEigenDARelayRegistry.sol @@ -0,0 +1,15 @@ +// SPDX-License-Identifier: MIT +pragma solidity ^0.8.9; + +interface IEigenDARelayRegistry { + + event RelayAdded(address indexed relay, uint32 indexed id, string relayURL); + + function setRelayURL(address relay, uint32 id, string memory relayURL) external; + + function getRelayURL(uint32 id) external view returns (string memory); + + function getRelayId(address relay) external view returns (uint32); + + function getRelayAddress(uint32 id) external view returns (address); +} diff --git a/contracts/src/interfaces/IEigenDAServiceManager.sol b/contracts/src/interfaces/IEigenDAServiceManager.sol index 21221ea9be..5e02f7511e 100644 --- a/contracts/src/interfaces/IEigenDAServiceManager.sol +++ b/contracts/src/interfaces/IEigenDAServiceManager.sol @@ -1,11 +1,13 @@ -// SPDX-License-Identifier: UNLICENSED +// SPDX-License-Identifier: MIT pragma solidity ^0.8.9; import {IServiceManager} from "eigenlayer-middleware/interfaces/IServiceManager.sol"; import {BLSSignatureChecker} from "eigenlayer-middleware/BLSSignatureChecker.sol"; import {BN254} from "eigenlayer-middleware/libraries/BN254.sol"; +import {IEigenDAThresholdRegistry} from "./IEigenDAThresholdRegistry.sol"; +import "./IEigenDAStructs.sol"; -interface IEigenDAServiceManager is IServiceManager { +interface IEigenDAServiceManager is IServiceManager, IEigenDAThresholdRegistry { // EVENTS /** @@ -22,46 +24,6 @@ interface IEigenDAServiceManager is IServiceManager { */ event BatchConfirmerStatusChanged(address batchConfirmer, bool status); - // STRUCTS - - struct QuorumBlobParam { - uint8 quorumNumber; - uint8 adversaryThresholdPercentage; - uint8 confirmationThresholdPercentage; - uint32 chunkLength; // the length of the chunks in the quorum - } - - struct BlobHeader { - BN254.G1Point commitment; // the kzg commitment to the blob - uint32 dataLength; // the length of the blob in coefficients of the polynomial - QuorumBlobParam[] quorumBlobParams; // the quorumBlobParams for each quorum - } - - struct ReducedBatchHeader { - bytes32 blobHeadersRoot; - uint32 referenceBlockNumber; - } - - struct BatchHeader { - bytes32 blobHeadersRoot; - bytes quorumNumbers; // each byte is a different quorum number - bytes signedStakeForQuorums; // every bytes is an amount less than 100 specifying the percentage of stake - // that has signed in the corresponding quorum in `quorumNumbers` - uint32 referenceBlockNumber; - } - - // Relevant metadata for a given datastore - struct BatchMetadata { - BatchHeader batchHeader; // the header of the data store - bytes32 signatoryRecordHash; // the hash of the signatory record - uint32 confirmationBlockNumber; // the block number at which the batch was confirmed - } - - // FUNCTIONS - - /// @notice mapping between the batchId to the hash of the metadata of the corresponding Batch - function batchIdToBatchMetadataHash(uint32 batchId) external view returns(bytes32); - /** * @notice This function is used for * - submitting data availabilty certificates, @@ -73,8 +35,8 @@ interface IEigenDAServiceManager is IServiceManager { BLSSignatureChecker.NonSignerStakesAndSignature memory nonSignerStakesAndSignature ) external; - /// @notice This function is used for changing the batch confirmer - function setBatchConfirmer(address _batchConfirmer) external; + /// @notice mapping between the batchId to the hash of the metadata of the corresponding Batch + function batchIdToBatchMetadataHash(uint32 batchId) external view returns(bytes32); /// @notice Returns the current batchId function taskNumber() external view returns (uint32); @@ -84,13 +46,4 @@ interface IEigenDAServiceManager is IServiceManager { /// @notice The maximum amount of blocks in the past that the service will consider stake amounts to still be 'valid'. function BLOCK_STALE_MEASURE() external view returns (uint32); - - /// @notice Returns the bytes array of quorumAdversaryThresholdPercentages - function quorumAdversaryThresholdPercentages() external view returns (bytes memory); - - /// @notice Returns the bytes array of quorumAdversaryThresholdPercentages - function quorumConfirmationThresholdPercentages() external view returns (bytes memory); - - /// @notice Returns the bytes array of quorumsNumbersRequired - function quorumNumbersRequired() external view returns (bytes memory); -} +} \ No newline at end of file diff --git a/contracts/src/interfaces/IEigenDASignatureVerifier.sol b/contracts/src/interfaces/IEigenDASignatureVerifier.sol new file mode 100644 index 0000000000..6490e36248 --- /dev/null +++ b/contracts/src/interfaces/IEigenDASignatureVerifier.sol @@ -0,0 +1,13 @@ +// SPDX-License-Identifier: MIT +pragma solidity ^0.8.9; + +import "./IEigenDAStructs.sol"; + +interface IEigenDASignatureVerifier { + function checkSignatures( + bytes32 msgHash, + bytes calldata quorumNumbers, + uint32 referenceBlockNumber, + NonSignerStakesAndSignature memory params + ) external view returns (QuorumStakeTotals memory, bytes32); +} \ No newline at end of file diff --git a/contracts/src/interfaces/IEigenDAStructs.sol b/contracts/src/interfaces/IEigenDAStructs.sol new file mode 100644 index 0000000000..445f4b3be8 --- /dev/null +++ b/contracts/src/interfaces/IEigenDAStructs.sol @@ -0,0 +1,128 @@ +// SPDX-License-Identifier: MIT +pragma solidity ^0.8.9; + +import {BN254} from "eigenlayer-middleware/libraries/BN254.sol"; + +///////////////////////// V1 /////////////////////////////// + +struct QuorumBlobParam { + uint8 quorumNumber; + uint8 adversaryThresholdPercentage; + uint8 confirmationThresholdPercentage; + uint32 chunkLength; +} + +struct BlobHeader { + BN254.G1Point commitment; + uint32 dataLength; + QuorumBlobParam[] quorumBlobParams; +} + +struct ReducedBatchHeader { + bytes32 blobHeadersRoot; + uint32 referenceBlockNumber; +} + +struct BatchHeader { + bytes32 blobHeadersRoot; + bytes quorumNumbers; + bytes signedStakeForQuorums; + uint32 referenceBlockNumber; +} + +struct BatchMetadata { + BatchHeader batchHeader; + bytes32 signatoryRecordHash; + uint32 confirmationBlockNumber; +} + +struct BlobVerificationProof { + uint32 batchId; + uint32 blobIndex; + BatchMetadata batchMetadata; + bytes inclusionProof; + bytes quorumIndices; +} + +///////////////////////// V2 /////////////////////////////// + +struct VersionedBlobParams { + uint32 maxNumOperators; + uint32 numChunks; + uint8 codingRate; +} + +struct SecurityThresholds { + uint8 confirmationThreshold; + uint8 adversaryThreshold; +} + +struct BlobVerificationProofV2 { + BlobCertificate blobCertificate; + uint32 blobIndex; + bytes inclusionProof; +} + +struct BlobCertificate { + BlobHeaderV2 blobHeader; + uint32 referenceBlockNumber; + uint32[] relayKeys; +} + +struct BlobHeaderV2 { + uint16 version; + bytes quorumNumbers; + BlobCommitment commitment; + bytes32 paymentHeaderHash; +} + +struct BlobCommitment { + BN254.G1Point commitment; + BN254.G2Point lengthCommitment; + BN254.G2Point lengthProof; + uint32 dataLength; +} + +struct SignedBatch { + BatchHeaderV2 batchHeader; + Attestation attestation; +} + +struct BatchHeaderV2 { + bytes32 batchRoot; + uint32 referenceBlockNumber; +} + +struct Attestation { + BN254.G1Point[] nonSignerPubkeys; + BN254.G1Point[] quorumApks; + BN254.G1Point sigma; + BN254.G2Point apkG2; + uint32[] quorumNumbers; + uint32 referenceBlockNumber; +} + +///////////////////////// SIGNATURE VERIFIER /////////////////////////////// + +struct NonSignerStakesAndSignature { + uint32[] nonSignerQuorumBitmapIndices; + BN254.G1Point[] nonSignerPubkeys; + BN254.G1Point[] quorumApks; + BN254.G2Point apkG2; + BN254.G1Point sigma; + uint32[] quorumApkIndices; + uint32[] totalStakeIndices; + uint32[][] nonSignerStakeIndices; +} + +struct QuorumStakeTotals { + uint96[] signedStakeForQuorum; + uint96[] totalStakeForQuorum; +} + +struct CheckSignaturesIndices { + uint32[] nonSignerQuorumBitmapIndices; + uint32[] quorumApkIndices; + uint32[] totalStakeIndices; + uint32[][] nonSignerStakeIndices; +} \ No newline at end of file diff --git a/contracts/src/interfaces/IEigenDAThresholdRegistry.sol b/contracts/src/interfaces/IEigenDAThresholdRegistry.sol new file mode 100644 index 0000000000..64ec0c60af --- /dev/null +++ b/contracts/src/interfaces/IEigenDAThresholdRegistry.sol @@ -0,0 +1,37 @@ +// SPDX-License-Identifier: MIT +pragma solidity ^0.8.9; + +import "../interfaces/IEigenDAStructs.sol"; + +interface IEigenDAThresholdRegistry { + + /// @notice Returns the blob params for a given blob version + function getBlobParams(uint16 version) external view returns (VersionedBlobParams memory); + + /// @notice Returns an array of bytes where each byte represents the adversary threshold percentage of the quorum at that index + function quorumAdversaryThresholdPercentages() external view returns (bytes memory); + + /// @notice Returns an array of bytes where each byte represents the confirmation threshold percentage of the quorum at that index + function quorumConfirmationThresholdPercentages() external view returns (bytes memory); + + /// @notice Returns an array of bytes where each byte represents the number of a required quorum + function quorumNumbersRequired() external view returns (bytes memory); + + /// @notice Gets the adversary threshold percentage for a quorum + function getQuorumAdversaryThresholdPercentage( + uint8 quorumNumber + ) external view returns (uint8); + + /// @notice Gets the confirmation threshold percentage for a quorum + function getQuorumConfirmationThresholdPercentage( + uint8 quorumNumber + ) external view returns (uint8); + + /// @notice Checks if a quorum is required + function getIsQuorumRequired( + uint8 quorumNumber + ) external view returns (bool); + + /// @notice Gets the default security thresholds for V2 + function getDefaultSecurityThresholdsV2() external view returns (SecurityThresholds memory); +} \ No newline at end of file diff --git a/contracts/src/libraries/EigenDAHasher.sol b/contracts/src/libraries/EigenDAHasher.sol index afeca28683..914e9721bf 100644 --- a/contracts/src/libraries/EigenDAHasher.sol +++ b/contracts/src/libraries/EigenDAHasher.sol @@ -1,8 +1,9 @@ -// SPDX-License-Identifier: UNLICENSED +// SPDX-License-Identifier: MIT pragma solidity ^0.8.9; import {IEigenDAServiceManager} from "../interfaces/IEigenDAServiceManager.sol"; +import "../interfaces/IEigenDAStructs.sol"; /** * @title Library of functions for hashing various EigenDA structs. @@ -41,10 +42,9 @@ library EigenDAHasher { /** * @notice given the batchHeader in the provided metdata, calculates the hash of the batchMetadata * @param batchMetadata the metadata of the batch - * @return the hash of the batchMetadata */ function hashBatchMetadata( - IEigenDAServiceManager.BatchMetadata memory batchMetadata + BatchMetadata memory batchMetadata ) internal pure returns(bytes32) { return hashBatchHashedMetadata( keccak256(abi.encode(batchMetadata.batchHeader)), @@ -57,7 +57,7 @@ library EigenDAHasher { * @notice hashes the given batch header * @param batchHeader the batch header to hash */ - function hashBatchHeaderMemory(IEigenDAServiceManager.BatchHeader memory batchHeader) internal pure returns(bytes32) { + function hashBatchHeaderMemory(BatchHeader memory batchHeader) internal pure returns(bytes32) { return keccak256(abi.encode(batchHeader)); } @@ -65,7 +65,7 @@ library EigenDAHasher { * @notice hashes the given batch header * @param batchHeader the batch header to hash */ - function hashBatchHeader(IEigenDAServiceManager.BatchHeader calldata batchHeader) internal pure returns(bytes32) { + function hashBatchHeader(BatchHeader calldata batchHeader) internal pure returns(bytes32) { return keccak256(abi.encode(batchHeader)); } @@ -73,7 +73,7 @@ library EigenDAHasher { * @notice hashes the given reduced batch header * @param reducedBatchHeader the reduced batch header to hash */ - function hashReducedBatchHeader(IEigenDAServiceManager.ReducedBatchHeader memory reducedBatchHeader) internal pure returns(bytes32) { + function hashReducedBatchHeader(ReducedBatchHeader memory reducedBatchHeader) internal pure returns(bytes32) { return keccak256(abi.encode(reducedBatchHeader)); } @@ -81,7 +81,7 @@ library EigenDAHasher { * @notice hashes the given blob header * @param blobHeader the blob header to hash */ - function hashBlobHeader(IEigenDAServiceManager.BlobHeader memory blobHeader) internal pure returns(bytes32) { + function hashBlobHeader(BlobHeader memory blobHeader) internal pure returns(bytes32) { return keccak256(abi.encode(blobHeader)); } @@ -89,8 +89,8 @@ library EigenDAHasher { * @notice converts a batch header to a reduced batch header * @param batchHeader the batch header to convert */ - function convertBatchHeaderToReducedBatchHeader(IEigenDAServiceManager.BatchHeader memory batchHeader) internal pure returns(IEigenDAServiceManager.ReducedBatchHeader memory) { - return IEigenDAServiceManager.ReducedBatchHeader({ + function convertBatchHeaderToReducedBatchHeader(BatchHeader memory batchHeader) internal pure returns(ReducedBatchHeader memory) { + return ReducedBatchHeader({ blobHeadersRoot: batchHeader.blobHeadersRoot, referenceBlockNumber: batchHeader.referenceBlockNumber }); @@ -100,7 +100,23 @@ library EigenDAHasher { * @notice converts the given batch header to a reduced batch header and then hashes it * @param batchHeader the batch header to hash */ - function hashBatchHeaderToReducedBatchHeader(IEigenDAServiceManager.BatchHeader memory batchHeader) internal pure returns(bytes32) { + function hashBatchHeaderToReducedBatchHeader(BatchHeader memory batchHeader) internal pure returns(bytes32) { return keccak256(abi.encode(convertBatchHeaderToReducedBatchHeader(batchHeader))); } -} + + /** + * @notice hashes the given V2 batch header + * @param batchHeader the V2 batch header to hash + */ + function hashBatchHeaderV2(BatchHeaderV2 memory batchHeader) internal pure returns(bytes32) { + return keccak256(abi.encode(batchHeader)); + } + + /** + * @notice hashes the given V2 blob header + * @param blobHeader the V2 blob header to hash + */ + function hashBlobHeaderV2(BlobHeaderV2 memory blobHeader) internal pure returns(bytes32) { + return keccak256(abi.encode(blobHeader)); + } +} \ No newline at end of file diff --git a/contracts/src/libraries/EigenDARollupUtils.sol b/contracts/src/libraries/EigenDARollupUtils.sol index 54ff2b33d8..f659c77401 100644 --- a/contracts/src/libraries/EigenDARollupUtils.sol +++ b/contracts/src/libraries/EigenDARollupUtils.sol @@ -7,6 +7,7 @@ import {BN254} from "eigenlayer-middleware/libraries/BN254.sol"; import {EigenDAHasher} from "./EigenDAHasher.sol"; import {IEigenDAServiceManager} from "../interfaces/IEigenDAServiceManager.sol"; import {BitmapUtils} from "eigenlayer-middleware/libraries/BitmapUtils.sol"; +import "../interfaces/IEigenDAStructs.sol"; /** * @title Library of functions to be used by smart contracts wanting to prove blobs on EigenDA and open KZG commitments. @@ -14,15 +15,6 @@ import {BitmapUtils} from "eigenlayer-middleware/libraries/BitmapUtils.sol"; */ library EigenDARollupUtils { using BN254 for BN254.G1Point; - - // STRUCTS - struct BlobVerificationProof { - uint32 batchId; - uint32 blobIndex; - IEigenDAServiceManager.BatchMetadata batchMetadata; - bytes inclusionProof; - bytes quorumIndices; - } /** * @notice Verifies the inclusion of a blob within a batch confirmed in `eigenDAServiceManager` and its trust assumptions @@ -31,7 +23,7 @@ library EigenDARollupUtils { * @param blobVerificationProof the relevant data needed to prove inclusion of the blob and that the trust assumptions were as expected */ function verifyBlob( - IEigenDAServiceManager.BlobHeader memory blobHeader, + BlobHeader memory blobHeader, IEigenDAServiceManager eigenDAServiceManager, BlobVerificationProof memory blobVerificationProof ) internal view { @@ -104,7 +96,7 @@ library EigenDARollupUtils { * @param blobVerificationProofs the relevant data needed to prove inclusion of the blobs and that the trust assumptions were as expected */ function verifyBlobs( - IEigenDAServiceManager.BlobHeader[] memory blobHeaders, + BlobHeader[] memory blobHeaders, IEigenDAServiceManager eigenDAServiceManager, BlobVerificationProof[] memory blobVerificationProofs ) internal view { diff --git a/contracts/src/rollup/MockRollup.sol b/contracts/src/rollup/MockRollup.sol index e0ac664155..fb8f24d586 100644 --- a/contracts/src/rollup/MockRollup.sol +++ b/contracts/src/rollup/MockRollup.sol @@ -5,6 +5,7 @@ import {EigenDARollupUtils} from "../libraries/EigenDARollupUtils.sol"; import {EigenDAServiceManager} from "../core/EigenDAServiceManager.sol"; import {IEigenDAServiceManager} from "../interfaces/IEigenDAServiceManager.sol"; import {BN254} from "eigenlayer-middleware/libraries/BN254.sol"; +import "../interfaces/IEigenDAStructs.sol"; struct Commitment { address confirmer; // confirmer who posted the commitment @@ -31,8 +32,8 @@ contract MockRollup { * @param blobVerificationProof the blob verification proof */ function postCommitment( - IEigenDAServiceManager.BlobHeader memory blobHeader, - EigenDARollupUtils.BlobVerificationProof memory blobVerificationProof + BlobHeader memory blobHeader, + BlobVerificationProof memory blobVerificationProof ) external { // require commitment has not already been posted require(commitments[block.timestamp].confirmer == address(0), "MockRollup.postCommitment: Commitment already posted"); diff --git a/contracts/test/harnesses/EigenDABlobUtilsHarness.sol b/contracts/test/harnesses/EigenDABlobUtilsHarness.sol index 1a3ceb17c3..c08914ab7d 100644 --- a/contracts/test/harnesses/EigenDABlobUtilsHarness.sol +++ b/contracts/test/harnesses/EigenDABlobUtilsHarness.sol @@ -4,21 +4,22 @@ pragma solidity ^0.8.9; import "../../src/libraries/EigenDARollupUtils.sol"; import "forge-std/Test.sol"; +import "../../src/interfaces/IEigenDAStructs.sol"; contract EigenDABlobUtilsHarness is Test { function verifyBlob( - IEigenDAServiceManager.BlobHeader calldata blobHeader, + BlobHeader calldata blobHeader, IEigenDAServiceManager eigenDAServiceManager, - EigenDARollupUtils.BlobVerificationProof calldata blobVerificationProof + BlobVerificationProof calldata blobVerificationProof ) external view { EigenDARollupUtils.verifyBlob(blobHeader, eigenDAServiceManager, blobVerificationProof); } function verifyBlobs( - IEigenDAServiceManager.BlobHeader[] calldata blobHeaders, + BlobHeader[] calldata blobHeaders, IEigenDAServiceManager eigenDAServiceManager, - EigenDARollupUtils.BlobVerificationProof[] calldata blobVerificationProofs + BlobVerificationProof[] calldata blobVerificationProofs ) external view { EigenDARollupUtils.verifyBlobs(blobHeaders, eigenDAServiceManager, blobVerificationProofs); } diff --git a/contracts/test/unit/EigenDABlobUtils.t.sol b/contracts/test/unit/EigenDABlobUtils.t.sol index aa7afed0fd..292a2ac347 100644 --- a/contracts/test/unit/EigenDABlobUtils.t.sol +++ b/contracts/test/unit/EigenDABlobUtils.t.sol @@ -11,17 +11,19 @@ import {EigenDAHasher} from "../../src/libraries/EigenDAHasher.sol"; import {EigenDABlobUtilsHarness} from "../harnesses/EigenDABlobUtilsHarness.sol"; import {EigenDAServiceManager} from "../../src/core/EigenDAServiceManager.sol"; import {IEigenDAServiceManager} from "../../src/interfaces/IEigenDAServiceManager.sol"; - +import {IEigenDAThresholdRegistry} from "../../src/interfaces/IEigenDAThresholdRegistry.sol"; +import {IEigenDARelayRegistry} from "../../src/interfaces/IEigenDARelayRegistry.sol"; +import "../../src/interfaces/IEigenDAStructs.sol"; import "forge-std/StdStorage.sol"; contract EigenDABlobUtilsUnit is BLSMockAVSDeployer { using stdStorage for StdStorage; using BN254 for BN254.G1Point; - using EigenDAHasher for IEigenDAServiceManager.BatchHeader; - using EigenDAHasher for IEigenDAServiceManager.ReducedBatchHeader; - using EigenDAHasher for IEigenDAServiceManager.BlobHeader; - using EigenDAHasher for IEigenDAServiceManager.BatchMetadata; + using EigenDAHasher for BatchHeader; + using EigenDAHasher for ReducedBatchHeader; + using EigenDAHasher for BlobHeader; + using EigenDAHasher for BatchMetadata; address confirmer = address(uint160(uint256(keccak256(abi.encodePacked("confirmer"))))); address notConfirmer = address(uint160(uint256(keccak256(abi.encodePacked("notConfirmer"))))); @@ -46,7 +48,9 @@ contract EigenDABlobUtilsUnit is BLSMockAVSDeployer { avsDirectory, rewardsCoordinator, registryCoordinator, - stakeRegistry + stakeRegistry, + IEigenDAThresholdRegistry(address(0)), + IEigenDARelayRegistry(address(0)) ); address[] memory confirmers = new address[](1); @@ -75,12 +79,12 @@ contract EigenDABlobUtilsUnit is BLSMockAVSDeployer { function testVerifyBlob_TwoQuorums(uint256 pseudoRandomNumber) public { uint256 numQuorumBlobParams = 2; - IEigenDAServiceManager.BlobHeader[] memory blobHeader = new IEigenDAServiceManager.BlobHeader[](2); + BlobHeader[] memory blobHeader = new BlobHeader[](2); blobHeader[0] = _generateRandomBlobHeader(pseudoRandomNumber, numQuorumBlobParams); uint256 anotherPseudoRandomNumber = uint256(keccak256(abi.encodePacked(pseudoRandomNumber))); blobHeader[1] = _generateRandomBlobHeader(anotherPseudoRandomNumber, numQuorumBlobParams); - IEigenDAServiceManager.BatchHeader memory batchHeader; + BatchHeader memory batchHeader; bytes memory firstBlobHash = abi.encodePacked(blobHeader[0].hashBlobHeader()); bytes memory secondBlobHash = abi.encodePacked(blobHeader[1].hashBlobHeader()); batchHeader.blobHeadersRoot = keccak256(abi.encodePacked(keccak256(firstBlobHash), keccak256(secondBlobHash))); @@ -92,7 +96,7 @@ contract EigenDABlobUtilsUnit is BLSMockAVSDeployer { batchHeader.referenceBlockNumber = uint32(block.number); // add dummy batch metadata - IEigenDAServiceManager.BatchMetadata memory batchMetadata; + BatchMetadata memory batchMetadata; batchMetadata.batchHeader = batchHeader; batchMetadata.signatoryRecordHash = keccak256(abi.encodePacked("signatoryRecordHash")); batchMetadata.confirmationBlockNumber = defaultConfirmationBlockNumber; @@ -103,7 +107,7 @@ contract EigenDABlobUtilsUnit is BLSMockAVSDeployer { .with_key(defaultBatchId) .checked_write(batchMetadata.hashBatchMetadata()); - EigenDARollupUtils.BlobVerificationProof memory blobVerificationProof; + BlobVerificationProof memory blobVerificationProof; blobVerificationProof.batchId = defaultBatchId; blobVerificationProof.batchMetadata = batchMetadata; blobVerificationProof.inclusionProof = abi.encodePacked(keccak256(firstBlobHash)); @@ -121,12 +125,12 @@ contract EigenDABlobUtilsUnit is BLSMockAVSDeployer { function testVerifyBlobs_TwoBlobs(uint256 pseudoRandomNumber) public { uint256 numQuorumBlobParams = 2; - IEigenDAServiceManager.BlobHeader[] memory blobHeader = new IEigenDAServiceManager.BlobHeader[](2); + BlobHeader[] memory blobHeader = new BlobHeader[](2); blobHeader[0] = _generateRandomBlobHeader(pseudoRandomNumber, numQuorumBlobParams); uint256 anotherPseudoRandomNumber = uint256(keccak256(abi.encodePacked(pseudoRandomNumber))); blobHeader[1] = _generateRandomBlobHeader(anotherPseudoRandomNumber, numQuorumBlobParams); - IEigenDAServiceManager.BatchHeader memory batchHeader; + BatchHeader memory batchHeader; bytes memory firstBlobHash = abi.encodePacked(blobHeader[0].hashBlobHeader()); bytes memory secondBlobHash = abi.encodePacked(blobHeader[1].hashBlobHeader()); batchHeader.blobHeadersRoot = keccak256(abi.encodePacked(keccak256(firstBlobHash), keccak256(secondBlobHash))); @@ -138,7 +142,7 @@ contract EigenDABlobUtilsUnit is BLSMockAVSDeployer { batchHeader.referenceBlockNumber = uint32(block.number); // add dummy batch metadata - IEigenDAServiceManager.BatchMetadata memory batchMetadata; + BatchMetadata memory batchMetadata; batchMetadata.batchHeader = batchHeader; batchMetadata.signatoryRecordHash = keccak256(abi.encodePacked("signatoryRecordHash")); batchMetadata.confirmationBlockNumber = defaultConfirmationBlockNumber; @@ -149,7 +153,7 @@ contract EigenDABlobUtilsUnit is BLSMockAVSDeployer { .with_key(defaultBatchId) .checked_write(batchMetadata.hashBatchMetadata()); - EigenDARollupUtils.BlobVerificationProof[] memory blobVerificationProofs = new EigenDARollupUtils.BlobVerificationProof[](2); + BlobVerificationProof[] memory blobVerificationProofs = new BlobVerificationProof[](2); blobVerificationProofs[0].batchId = defaultBatchId; blobVerificationProofs[1].batchId = defaultBatchId; blobVerificationProofs[0].batchMetadata = batchMetadata; @@ -173,12 +177,12 @@ contract EigenDABlobUtilsUnit is BLSMockAVSDeployer { function testVerifyBlob_InvalidMetadataHash(uint256 pseudoRandomNumber) public { uint256 numQuorumBlobParams = pseudoRandomNumber % 192; - IEigenDAServiceManager.BlobHeader[] memory blobHeader = new IEigenDAServiceManager.BlobHeader[](2); + BlobHeader[] memory blobHeader = new BlobHeader[](2); blobHeader[0] = _generateRandomBlobHeader(pseudoRandomNumber, numQuorumBlobParams); uint256 anotherPseudoRandomNumber = uint256(keccak256(abi.encodePacked(pseudoRandomNumber))); blobHeader[1] = _generateRandomBlobHeader(anotherPseudoRandomNumber, numQuorumBlobParams); - EigenDARollupUtils.BlobVerificationProof memory blobVerificationProof; + BlobVerificationProof memory blobVerificationProof; blobVerificationProof.batchId = defaultBatchId; cheats.expectRevert("EigenDARollupUtils.verifyBlob: batchMetadata does not match stored metadata"); @@ -187,13 +191,13 @@ contract EigenDABlobUtilsUnit is BLSMockAVSDeployer { function testVerifyBlob_InvalidMerkleProof(uint256 pseudoRandomNumber) public { uint256 numQuorumBlobParams = pseudoRandomNumber % 192; - IEigenDAServiceManager.BlobHeader[] memory blobHeader = new IEigenDAServiceManager.BlobHeader[](2); + BlobHeader[] memory blobHeader = new BlobHeader[](2); blobHeader[0] = _generateRandomBlobHeader(pseudoRandomNumber, numQuorumBlobParams); uint256 anotherPseudoRandomNumber = uint256(keccak256(abi.encodePacked(pseudoRandomNumber))); blobHeader[1] = _generateRandomBlobHeader(anotherPseudoRandomNumber, numQuorumBlobParams); // add dummy batch metadata - IEigenDAServiceManager.BatchMetadata memory batchMetadata; + BatchMetadata memory batchMetadata; stdstore .target(address(eigenDAServiceManager)) @@ -201,7 +205,7 @@ contract EigenDABlobUtilsUnit is BLSMockAVSDeployer { .with_key(defaultBatchId) .checked_write(batchMetadata.hashBatchMetadata()); - EigenDARollupUtils.BlobVerificationProof memory blobVerificationProof; + BlobVerificationProof memory blobVerificationProof; blobVerificationProof.batchId = defaultBatchId; blobVerificationProof.batchMetadata = batchMetadata; blobVerificationProof.inclusionProof = abi.encodePacked(bytes32(0)); @@ -213,12 +217,12 @@ contract EigenDABlobUtilsUnit is BLSMockAVSDeployer { function testVerifyBlob_RandomNumberOfQuorums(uint256 pseudoRandomNumber) public { uint256 numQuorumBlobParams = 2 + (pseudoRandomNumber % 192); - IEigenDAServiceManager.BlobHeader[] memory blobHeader = new IEigenDAServiceManager.BlobHeader[](2); + BlobHeader[] memory blobHeader = new BlobHeader[](2); blobHeader[0] = _generateRandomBlobHeader(pseudoRandomNumber, numQuorumBlobParams); uint256 anotherPseudoRandomNumber = uint256(keccak256(abi.encodePacked(pseudoRandomNumber))); blobHeader[1] = _generateRandomBlobHeader(anotherPseudoRandomNumber, numQuorumBlobParams); - IEigenDAServiceManager.BatchHeader memory batchHeader; + BatchHeader memory batchHeader; bytes memory firstBlobHash = abi.encodePacked(blobHeader[0].hashBlobHeader()); bytes memory secondBlobHash = abi.encodePacked(blobHeader[1].hashBlobHeader()); batchHeader.blobHeadersRoot = keccak256(abi.encodePacked(keccak256(firstBlobHash), keccak256(secondBlobHash))); @@ -230,7 +234,7 @@ contract EigenDABlobUtilsUnit is BLSMockAVSDeployer { batchHeader.referenceBlockNumber = uint32(block.number); // add dummy batch metadata - IEigenDAServiceManager.BatchMetadata memory batchMetadata; + BatchMetadata memory batchMetadata; batchMetadata.batchHeader = batchHeader; batchMetadata.signatoryRecordHash = keccak256(abi.encodePacked("signatoryRecordHash")); batchMetadata.confirmationBlockNumber = defaultConfirmationBlockNumber; @@ -241,7 +245,7 @@ contract EigenDABlobUtilsUnit is BLSMockAVSDeployer { .with_key(defaultBatchId) .checked_write(batchMetadata.hashBatchMetadata()); - EigenDARollupUtils.BlobVerificationProof memory blobVerificationProof; + BlobVerificationProof memory blobVerificationProof; blobVerificationProof.batchId = defaultBatchId; blobVerificationProof.batchMetadata = batchMetadata; blobVerificationProof.inclusionProof = abi.encodePacked(keccak256(firstBlobHash)); @@ -257,14 +261,14 @@ contract EigenDABlobUtilsUnit is BLSMockAVSDeployer { emit log_named_uint("gas used", gasBefore - gasAfter); } - function xtestVerifyBlob_RequiredQuorumsNotMet(uint256 pseudoRandomNumber) public { + function testVerifyBlob_RequiredQuorumsNotMet(uint256 pseudoRandomNumber) public { uint256 numQuorumBlobParams = 1; - IEigenDAServiceManager.BlobHeader[] memory blobHeader = new IEigenDAServiceManager.BlobHeader[](2); + BlobHeader[] memory blobHeader = new BlobHeader[](2); blobHeader[0] = _generateRandomBlobHeader(pseudoRandomNumber, numQuorumBlobParams); uint256 anotherPseudoRandomNumber = uint256(keccak256(abi.encodePacked(pseudoRandomNumber))); blobHeader[1] = _generateRandomBlobHeader(anotherPseudoRandomNumber, numQuorumBlobParams); - IEigenDAServiceManager.BatchHeader memory batchHeader; + BatchHeader memory batchHeader; bytes memory firstBlobHash = abi.encodePacked(blobHeader[0].hashBlobHeader()); bytes memory secondBlobHash = abi.encodePacked(blobHeader[1].hashBlobHeader()); batchHeader.blobHeadersRoot = keccak256(abi.encodePacked(keccak256(firstBlobHash), keccak256(secondBlobHash))); @@ -276,7 +280,7 @@ contract EigenDABlobUtilsUnit is BLSMockAVSDeployer { batchHeader.referenceBlockNumber = uint32(block.number); // add dummy batch metadata - IEigenDAServiceManager.BatchMetadata memory batchMetadata; + BatchMetadata memory batchMetadata; batchMetadata.batchHeader = batchHeader; batchMetadata.signatoryRecordHash = keccak256(abi.encodePacked("signatoryRecordHash")); batchMetadata.confirmationBlockNumber = defaultConfirmationBlockNumber; @@ -287,7 +291,7 @@ contract EigenDABlobUtilsUnit is BLSMockAVSDeployer { .with_key(defaultBatchId) .checked_write(batchMetadata.hashBatchMetadata()); - EigenDARollupUtils.BlobVerificationProof memory blobVerificationProof; + BlobVerificationProof memory blobVerificationProof; blobVerificationProof.batchId = defaultBatchId; blobVerificationProof.batchMetadata = batchMetadata; blobVerificationProof.inclusionProof = abi.encodePacked(keccak256(firstBlobHash)); @@ -301,9 +305,9 @@ contract EigenDABlobUtilsUnit is BLSMockAVSDeployer { eigenDABlobUtilsHarness.verifyBlob(blobHeader[1], eigenDAServiceManager, blobVerificationProof); } - function xtestVerifyBlob_AdversayThresholdNotMet(uint256 pseudoRandomNumber) public { + function testVerifyBlob_AdversayThresholdNotMet(uint256 pseudoRandomNumber) public { uint256 numQuorumBlobParams = 2; - IEigenDAServiceManager.BlobHeader[] memory blobHeader = new IEigenDAServiceManager.BlobHeader[](2); + BlobHeader[] memory blobHeader = new BlobHeader[](2); blobHeader[0] = _generateRandomBlobHeader(pseudoRandomNumber, numQuorumBlobParams); uint256 anotherPseudoRandomNumber = uint256(keccak256(abi.encodePacked(pseudoRandomNumber))); blobHeader[1] = _generateRandomBlobHeader(anotherPseudoRandomNumber, numQuorumBlobParams); @@ -313,7 +317,7 @@ contract EigenDABlobUtilsUnit is BLSMockAVSDeployer { blobHeader[1].quorumBlobParams[i].adversaryThresholdPercentage = EigenDARollupUtils.getQuorumAdversaryThreshold(eigenDAServiceManager, blobHeader[1].quorumBlobParams[i].quorumNumber) - 1; } - IEigenDAServiceManager.BatchHeader memory batchHeader; + BatchHeader memory batchHeader; bytes memory firstBlobHash = abi.encodePacked(blobHeader[0].hashBlobHeader()); bytes memory secondBlobHash = abi.encodePacked(blobHeader[1].hashBlobHeader()); batchHeader.blobHeadersRoot = keccak256(abi.encodePacked(keccak256(firstBlobHash), keccak256(secondBlobHash))); @@ -325,7 +329,7 @@ contract EigenDABlobUtilsUnit is BLSMockAVSDeployer { batchHeader.referenceBlockNumber = uint32(block.number); // add dummy batch metadata - IEigenDAServiceManager.BatchMetadata memory batchMetadata; + BatchMetadata memory batchMetadata; batchMetadata.batchHeader = batchHeader; batchMetadata.signatoryRecordHash = keccak256(abi.encodePacked("signatoryRecordHash")); batchMetadata.confirmationBlockNumber = defaultConfirmationBlockNumber; @@ -336,7 +340,7 @@ contract EigenDABlobUtilsUnit is BLSMockAVSDeployer { .with_key(defaultBatchId) .checked_write(batchMetadata.hashBatchMetadata()); - EigenDARollupUtils.BlobVerificationProof memory blobVerificationProof; + BlobVerificationProof memory blobVerificationProof; blobVerificationProof.batchId = defaultBatchId; blobVerificationProof.batchMetadata = batchMetadata; blobVerificationProof.inclusionProof = abi.encodePacked(keccak256(firstBlobHash)); @@ -352,12 +356,12 @@ contract EigenDABlobUtilsUnit is BLSMockAVSDeployer { function testVerifyBlob_QuorumNumberMismatch(uint256 pseudoRandomNumber) public { uint256 numQuorumBlobParams = 2; - IEigenDAServiceManager.BlobHeader[] memory blobHeader = new IEigenDAServiceManager.BlobHeader[](2); + BlobHeader[] memory blobHeader = new BlobHeader[](2); blobHeader[0] = _generateRandomBlobHeader(pseudoRandomNumber, numQuorumBlobParams); uint256 anotherPseudoRandomNumber = uint256(keccak256(abi.encodePacked(pseudoRandomNumber))); blobHeader[1] = _generateRandomBlobHeader(anotherPseudoRandomNumber, numQuorumBlobParams); - IEigenDAServiceManager.BatchHeader memory batchHeader; + BatchHeader memory batchHeader; bytes memory firstBlobHash = abi.encodePacked(blobHeader[0].hashBlobHeader()); bytes memory secondBlobHash = abi.encodePacked(blobHeader[1].hashBlobHeader()); batchHeader.blobHeadersRoot = keccak256(abi.encodePacked(keccak256(firstBlobHash), keccak256(secondBlobHash))); @@ -369,7 +373,7 @@ contract EigenDABlobUtilsUnit is BLSMockAVSDeployer { batchHeader.referenceBlockNumber = uint32(block.number); // add dummy batch metadata - IEigenDAServiceManager.BatchMetadata memory batchMetadata; + BatchMetadata memory batchMetadata; batchMetadata.batchHeader = batchHeader; batchMetadata.signatoryRecordHash = keccak256(abi.encodePacked("signatoryRecordHash")); batchMetadata.confirmationBlockNumber = defaultConfirmationBlockNumber; @@ -380,7 +384,7 @@ contract EigenDABlobUtilsUnit is BLSMockAVSDeployer { .with_key(defaultBatchId) .checked_write(batchMetadata.hashBatchMetadata()); - EigenDARollupUtils.BlobVerificationProof memory blobVerificationProof; + BlobVerificationProof memory blobVerificationProof; blobVerificationProof.batchId = defaultBatchId; blobVerificationProof.batchMetadata = batchMetadata; blobVerificationProof.inclusionProof = abi.encodePacked(keccak256(firstBlobHash)); @@ -397,12 +401,12 @@ contract EigenDABlobUtilsUnit is BLSMockAVSDeployer { function testVerifyBlob_QuorumThresholdNotMet(uint256 pseudoRandomNumber) public { uint256 numQuorumBlobParams = 2; - IEigenDAServiceManager.BlobHeader[] memory blobHeader = new IEigenDAServiceManager.BlobHeader[](2); + BlobHeader[] memory blobHeader = new BlobHeader[](2); blobHeader[0] = _generateRandomBlobHeader(pseudoRandomNumber, numQuorumBlobParams); uint256 anotherPseudoRandomNumber = uint256(keccak256(abi.encodePacked(pseudoRandomNumber))); blobHeader[1] = _generateRandomBlobHeader(anotherPseudoRandomNumber, numQuorumBlobParams); - IEigenDAServiceManager.BatchHeader memory batchHeader; + BatchHeader memory batchHeader; bytes memory firstBlobHash = abi.encodePacked(blobHeader[0].hashBlobHeader()); bytes memory secondBlobHash = abi.encodePacked(blobHeader[1].hashBlobHeader()); batchHeader.blobHeadersRoot = keccak256(abi.encodePacked(keccak256(firstBlobHash), keccak256(secondBlobHash))); @@ -414,7 +418,7 @@ contract EigenDABlobUtilsUnit is BLSMockAVSDeployer { batchHeader.referenceBlockNumber = uint32(block.number); // add dummy batch metadata - IEigenDAServiceManager.BatchMetadata memory batchMetadata; + BatchMetadata memory batchMetadata; batchMetadata.batchHeader = batchHeader; batchMetadata.signatoryRecordHash = keccak256(abi.encodePacked("signatoryRecordHash")); batchMetadata.confirmationBlockNumber = defaultConfirmationBlockNumber; @@ -425,7 +429,7 @@ contract EigenDABlobUtilsUnit is BLSMockAVSDeployer { .with_key(defaultBatchId) .checked_write(batchMetadata.hashBatchMetadata()); - EigenDARollupUtils.BlobVerificationProof memory blobVerificationProof; + BlobVerificationProof memory blobVerificationProof; blobVerificationProof.batchId = defaultBatchId; blobVerificationProof.batchMetadata = batchMetadata; blobVerificationProof.inclusionProof = abi.encodePacked(keccak256(firstBlobHash)); @@ -441,18 +445,18 @@ contract EigenDABlobUtilsUnit is BLSMockAVSDeployer { } // generates a random blob header with the given coding ratio percentage as the ratio of original data to encoded data - function _generateRandomBlobHeader(uint256 pseudoRandomNumber, uint256 numQuorumsBlobParams) internal returns (IEigenDAServiceManager.BlobHeader memory) { + function _generateRandomBlobHeader(uint256 pseudoRandomNumber, uint256 numQuorumsBlobParams) internal returns (BlobHeader memory) { if(pseudoRandomNumber == 0) { pseudoRandomNumber = 1; } - IEigenDAServiceManager.BlobHeader memory blobHeader; + BlobHeader memory blobHeader; blobHeader.commitment.X = uint256(keccak256(abi.encodePacked(pseudoRandomNumber, "blobHeader.commitment.X"))) % BN254.FP_MODULUS; blobHeader.commitment.Y = uint256(keccak256(abi.encodePacked(pseudoRandomNumber, "blobHeader.commitment.Y"))) % BN254.FP_MODULUS; blobHeader.dataLength = uint32(uint256(keccak256(abi.encodePacked(pseudoRandomNumber, "blobHeader.dataLength")))); - blobHeader.quorumBlobParams = new IEigenDAServiceManager.QuorumBlobParam[](numQuorumsBlobParams); + blobHeader.quorumBlobParams = new QuorumBlobParam[](numQuorumsBlobParams); blobHeader.dataLength = uint32(uint256(keccak256(abi.encodePacked(pseudoRandomNumber, "blobHeader.dataLength")))); for (uint i = 0; i < numQuorumsBlobParams; i++) { if(i < 2){ diff --git a/contracts/test/unit/EigenDAServiceManagerUnit.t.sol b/contracts/test/unit/EigenDAServiceManagerUnit.t.sol index c3d6218478..cf94677511 100644 --- a/contracts/test/unit/EigenDAServiceManagerUnit.t.sol +++ b/contracts/test/unit/EigenDAServiceManagerUnit.t.sol @@ -8,11 +8,14 @@ import {EigenDAServiceManager, IRewardsCoordinator} from "../../src/core/EigenDA import {EigenDAHasher} from "../../src/libraries/EigenDAHasher.sol"; import {EigenDAServiceManager} from "../../src/core/EigenDAServiceManager.sol"; import {IEigenDAServiceManager} from "../../src/interfaces/IEigenDAServiceManager.sol"; +import "../../src/interfaces/IEigenDAStructs.sol"; +import {IEigenDAThresholdRegistry} from "../../src/interfaces/IEigenDAThresholdRegistry.sol"; +import {IEigenDARelayRegistry} from "../../src/interfaces/IEigenDARelayRegistry.sol"; contract EigenDAServiceManagerUnit is BLSMockAVSDeployer { using BN254 for BN254.G1Point; - using EigenDAHasher for IEigenDAServiceManager.BatchHeader; - using EigenDAHasher for IEigenDAServiceManager.ReducedBatchHeader; + using EigenDAHasher for BatchHeader; + using EigenDAHasher for ReducedBatchHeader; address confirmer = address(uint160(uint256(keccak256(abi.encodePacked("confirmer"))))); address notConfirmer = address(uint160(uint256(keccak256(abi.encodePacked("notConfirmer"))))); @@ -35,7 +38,9 @@ contract EigenDAServiceManagerUnit is BLSMockAVSDeployer { avsDirectory, rewardsCoordinator, registryCoordinator, - stakeRegistry + stakeRegistry, + IEigenDAThresholdRegistry(address(0)), + IEigenDARelayRegistry(address(0)) ); address[] memory confirmers = new address[](1); @@ -61,7 +66,7 @@ contract EigenDAServiceManagerUnit is BLSMockAVSDeployer { } function testConfirmBatch_AllSigning_Valid(uint256 pseudoRandomNumber) public { - (IEigenDAServiceManager.BatchHeader memory batchHeader, BLSSignatureChecker.NonSignerStakesAndSignature memory nonSignerStakesAndSignature) + (BatchHeader memory batchHeader, BLSSignatureChecker.NonSignerStakesAndSignature memory nonSignerStakesAndSignature) = _getHeaderandNonSigners(0, pseudoRandomNumber, 100); uint32 batchIdToConfirm = eigenDAServiceManager.batchId(); @@ -82,7 +87,7 @@ contract EigenDAServiceManagerUnit is BLSMockAVSDeployer { } function testConfirmBatch_Revert_NotEOA(uint256 pseudoRandomNumber) public { - (IEigenDAServiceManager.BatchHeader memory batchHeader, BLSSignatureChecker.NonSignerStakesAndSignature memory nonSignerStakesAndSignature) + (BatchHeader memory batchHeader, BLSSignatureChecker.NonSignerStakesAndSignature memory nonSignerStakesAndSignature) = _getHeaderandNonSigners(0, pseudoRandomNumber, 100); cheats.expectRevert(bytes("EigenDAServiceManager.confirmBatch: header and nonsigner data must be in calldata")); @@ -94,7 +99,7 @@ contract EigenDAServiceManagerUnit is BLSMockAVSDeployer { } function testConfirmBatch_Revert_NotConfirmer(uint256 pseudoRandomNumber) public { - (IEigenDAServiceManager.BatchHeader memory batchHeader, BLSSignatureChecker.NonSignerStakesAndSignature memory nonSignerStakesAndSignature) + (BatchHeader memory batchHeader, BLSSignatureChecker.NonSignerStakesAndSignature memory nonSignerStakesAndSignature) = _getHeaderandNonSigners(0, pseudoRandomNumber, 100); cheats.expectRevert(bytes("onlyBatchConfirmer: not from batch confirmer")); @@ -113,7 +118,7 @@ contract EigenDAServiceManagerUnit is BLSMockAVSDeployer { (, BLSSignatureChecker.NonSignerStakesAndSignature memory nonSignerStakesAndSignature) = _registerSignatoriesAndGetNonSignerStakeAndSignatureRandom(pseudoRandomNumber, 0, quorumBitmap); - IEigenDAServiceManager.BatchHeader memory batchHeader = + BatchHeader memory batchHeader = _getRandomBatchHeader(pseudoRandomNumber, quorumNumbers, uint32(block.number + 1), 100); bytes32 batchHeaderHash = batchHeader.hashBatchHeaderMemory(); @@ -128,7 +133,7 @@ contract EigenDAServiceManagerUnit is BLSMockAVSDeployer { } function testConfirmBatch_Revert_PastBlocknumber(uint256 pseudoRandomNumber) public { - (IEigenDAServiceManager.BatchHeader memory batchHeader, BLSSignatureChecker.NonSignerStakesAndSignature memory nonSignerStakesAndSignature) + (BatchHeader memory batchHeader, BLSSignatureChecker.NonSignerStakesAndSignature memory nonSignerStakesAndSignature) = _getHeaderandNonSigners(0, pseudoRandomNumber, 100); cheats.roll(block.number + eigenDAServiceManager.BLOCK_STALE_MEASURE()); @@ -141,7 +146,7 @@ contract EigenDAServiceManagerUnit is BLSMockAVSDeployer { } function testConfirmBatch_Revert_Threshold(uint256 pseudoRandomNumber) public { - (IEigenDAServiceManager.BatchHeader memory batchHeader, BLSSignatureChecker.NonSignerStakesAndSignature memory nonSignerStakesAndSignature) + (BatchHeader memory batchHeader, BLSSignatureChecker.NonSignerStakesAndSignature memory nonSignerStakesAndSignature) = _getHeaderandNonSigners(1, pseudoRandomNumber, 100); cheats.expectRevert(bytes("EigenDAServiceManager.confirmBatch: signatories do not own at least threshold percentage of a quorum")); @@ -153,7 +158,7 @@ contract EigenDAServiceManagerUnit is BLSMockAVSDeployer { } function testConfirmBatch_NonSigner_Valid(uint256 pseudoRandomNumber) public { - (IEigenDAServiceManager.BatchHeader memory batchHeader, BLSSignatureChecker.NonSignerStakesAndSignature memory nonSignerStakesAndSignature) + (BatchHeader memory batchHeader, BLSSignatureChecker.NonSignerStakesAndSignature memory nonSignerStakesAndSignature) = _getHeaderandNonSigners(1, pseudoRandomNumber, 75); uint32 batchIdToConfirm = eigenDAServiceManager.batchId(); @@ -174,7 +179,7 @@ contract EigenDAServiceManagerUnit is BLSMockAVSDeployer { } function testConfirmBatch_Revert_LengthMismatch(uint256 pseudoRandomNumber) public { - (IEigenDAServiceManager.BatchHeader memory batchHeader, BLSSignatureChecker.NonSignerStakesAndSignature memory nonSignerStakesAndSignature) + (BatchHeader memory batchHeader, BLSSignatureChecker.NonSignerStakesAndSignature memory nonSignerStakesAndSignature) = _getHeaderandNonSigners(0, pseudoRandomNumber, 100); batchHeader.signedStakeForQuorums = new bytes(0); @@ -186,7 +191,7 @@ contract EigenDAServiceManagerUnit is BLSMockAVSDeployer { ); } - function _getHeaderandNonSigners(uint256 _nonSigners, uint256 _pseudoRandomNumber, uint8 _threshold) internal returns (IEigenDAServiceManager.BatchHeader memory, BLSSignatureChecker.NonSignerStakesAndSignature memory) { + function _getHeaderandNonSigners(uint256 _nonSigners, uint256 _pseudoRandomNumber, uint8 _threshold) internal returns (BatchHeader memory, BLSSignatureChecker.NonSignerStakesAndSignature memory) { // register a bunch of operators uint256 quorumBitmap = 1; bytes memory quorumNumbers = BitmapUtils.bitmapToBytesArray(quorumBitmap); @@ -196,7 +201,7 @@ contract EigenDAServiceManagerUnit is BLSMockAVSDeployer { _registerSignatoriesAndGetNonSignerStakeAndSignatureRandom(_pseudoRandomNumber, _nonSigners, quorumBitmap); // get a random batch header - IEigenDAServiceManager.BatchHeader memory batchHeader = _getRandomBatchHeader(_pseudoRandomNumber, quorumNumbers, referenceBlockNumber, _threshold); + BatchHeader memory batchHeader = _getRandomBatchHeader(_pseudoRandomNumber, quorumNumbers, referenceBlockNumber, _threshold); // set batch specific signature bytes32 reducedBatchHeaderHash = batchHeader.hashBatchHeaderToReducedBatchHeader(); @@ -205,8 +210,8 @@ contract EigenDAServiceManagerUnit is BLSMockAVSDeployer { return (batchHeader, nonSignerStakesAndSignature); } - function _getRandomBatchHeader(uint256 pseudoRandomNumber, bytes memory quorumNumbers, uint32 referenceBlockNumber, uint8 threshold) internal pure returns(IEigenDAServiceManager.BatchHeader memory) { - IEigenDAServiceManager.BatchHeader memory batchHeader; + function _getRandomBatchHeader(uint256 pseudoRandomNumber, bytes memory quorumNumbers, uint32 referenceBlockNumber, uint8 threshold) internal pure returns(BatchHeader memory) { + BatchHeader memory batchHeader; batchHeader.blobHeadersRoot = keccak256(abi.encodePacked("blobHeadersRoot", pseudoRandomNumber)); batchHeader.quorumNumbers = quorumNumbers; batchHeader.signedStakeForQuorums = new bytes(quorumNumbers.length); diff --git a/contracts/test/unit/MockRollup.t.sol b/contracts/test/unit/MockRollup.t.sol index bde2d55cad..0150c56a88 100644 --- a/contracts/test/unit/MockRollup.t.sol +++ b/contracts/test/unit/MockRollup.t.sol @@ -11,16 +11,18 @@ import {EigenDAServiceManager, IRewardsCoordinator} from "../../src/core/EigenDA import {IEigenDAServiceManager} from "../../src/interfaces/IEigenDAServiceManager.sol"; import {EigenDARollupUtils} from "../../src/libraries/EigenDARollupUtils.sol"; import {BN254} from "eigenlayer-middleware/libraries/BN254.sol"; - +import "../../src/interfaces/IEigenDAStructs.sol"; +import {IEigenDAThresholdRegistry} from "../../src/interfaces/IEigenDAThresholdRegistry.sol"; +import {IEigenDARelayRegistry} from "../../src/interfaces/IEigenDARelayRegistry.sol"; import "forge-std/StdStorage.sol"; contract MockRollupTest is BLSMockAVSDeployer { using stdStorage for StdStorage; using BN254 for BN254.G1Point; - using EigenDAHasher for IEigenDAServiceManager.BatchHeader; - using EigenDAHasher for IEigenDAServiceManager.ReducedBatchHeader; - using EigenDAHasher for IEigenDAServiceManager.BlobHeader; - using EigenDAHasher for IEigenDAServiceManager.BatchMetadata; + using EigenDAHasher for BatchHeader; + using EigenDAHasher for ReducedBatchHeader; + using EigenDAHasher for BlobHeader; + using EigenDAHasher for BatchMetadata; EigenDAServiceManager eigenDAServiceManager; EigenDAServiceManager eigenDAServiceManagerImplementation; @@ -56,7 +58,9 @@ contract MockRollupTest is BLSMockAVSDeployer { avsDirectory, rewardsCoordinator, registryCoordinator, - stakeRegistry + stakeRegistry, + IEigenDAThresholdRegistry(address(0)), + IEigenDARelayRegistry(address(0)) ); address[] memory confirmers = new address[](1); @@ -92,7 +96,7 @@ contract MockRollupTest is BLSMockAVSDeployer { function testChallenge(uint256 pseudoRandomNumber) public { //get commitment with illegal value - (IEigenDAServiceManager.BlobHeader memory blobHeader, EigenDARollupUtils.BlobVerificationProof memory blobVerificationProof) = _getCommitment(pseudoRandomNumber); + (BlobHeader memory blobHeader, BlobVerificationProof memory blobVerificationProof) = _getCommitment(pseudoRandomNumber); mockRollup.postCommitment(blobHeader, blobVerificationProof); @@ -107,14 +111,14 @@ contract MockRollupTest is BLSMockAVSDeployer { illegalCommitment = s0.scalar_mul(1).plus(s1.scalar_mul(1)).plus(s2.scalar_mul(1)).plus(s3.scalar_mul(1)).plus(s4.scalar_mul(1)); } - function _getCommitment(uint256 pseudoRandomNumber) internal returns (IEigenDAServiceManager.BlobHeader memory, EigenDARollupUtils.BlobVerificationProof memory){ + function _getCommitment(uint256 pseudoRandomNumber) internal returns (BlobHeader memory, BlobVerificationProof memory){ uint256 numQuorumBlobParams = 2; - IEigenDAServiceManager.BlobHeader[] memory blobHeader = new IEigenDAServiceManager.BlobHeader[](2); + BlobHeader[] memory blobHeader = new BlobHeader[](2); blobHeader[0] = _generateBlobHeader(pseudoRandomNumber, numQuorumBlobParams); uint256 anotherPseudoRandomNumber = uint256(keccak256(abi.encodePacked(pseudoRandomNumber))); blobHeader[1] = _generateBlobHeader(anotherPseudoRandomNumber, numQuorumBlobParams); - IEigenDAServiceManager.BatchHeader memory batchHeader; + BatchHeader memory batchHeader; bytes memory firstBlobHash = abi.encodePacked(blobHeader[0].hashBlobHeader()); bytes memory secondBlobHash = abi.encodePacked(blobHeader[1].hashBlobHeader()); batchHeader.blobHeadersRoot = keccak256(abi.encodePacked(keccak256(firstBlobHash), keccak256(secondBlobHash))); @@ -126,7 +130,7 @@ contract MockRollupTest is BLSMockAVSDeployer { batchHeader.referenceBlockNumber = uint32(block.number); // add dummy batch metadata - IEigenDAServiceManager.BatchMetadata memory batchMetadata; + BatchMetadata memory batchMetadata; batchMetadata.batchHeader = batchHeader; batchMetadata.signatoryRecordHash = keccak256(abi.encodePacked("signatoryRecordHash")); batchMetadata.confirmationBlockNumber = defaultConfirmationBlockNumber; @@ -137,7 +141,7 @@ contract MockRollupTest is BLSMockAVSDeployer { .with_key(defaultBatchId) .checked_write(batchMetadata.hashBatchMetadata()); - EigenDARollupUtils.BlobVerificationProof memory blobVerificationProof; + BlobVerificationProof memory blobVerificationProof; blobVerificationProof.batchId = defaultBatchId; blobVerificationProof.batchMetadata = batchMetadata; blobVerificationProof.inclusionProof = abi.encodePacked(keccak256(firstBlobHash)); @@ -150,17 +154,17 @@ contract MockRollupTest is BLSMockAVSDeployer { return (blobHeader[1], blobVerificationProof); } - function _generateBlobHeader(uint256 pseudoRandomNumber, uint256 numQuorumsBlobParams) internal returns (IEigenDAServiceManager.BlobHeader memory) { + function _generateBlobHeader(uint256 pseudoRandomNumber, uint256 numQuorumsBlobParams) internal returns (BlobHeader memory) { if(pseudoRandomNumber == 0) { pseudoRandomNumber = 1; } - IEigenDAServiceManager.BlobHeader memory blobHeader; + BlobHeader memory blobHeader; blobHeader.commitment = _getIllegalCommitment(); blobHeader.dataLength = uint32(uint256(keccak256(abi.encodePacked(pseudoRandomNumber, "blobHeader.dataLength")))); - blobHeader.quorumBlobParams = new IEigenDAServiceManager.QuorumBlobParam[](numQuorumsBlobParams); + blobHeader.quorumBlobParams = new QuorumBlobParam[](numQuorumsBlobParams); for (uint i = 0; i < numQuorumsBlobParams; i++) { if(i < 2){ blobHeader.quorumBlobParams[i].quorumNumber = uint8(i);