diff --git a/client/accounting_test.go b/client/accounting_test.go index 570d724a..8aad8d82 100644 --- a/client/accounting_test.go +++ b/client/accounting_test.go @@ -7,49 +7,40 @@ import ( "fmt" "math/rand" "testing" - "time" v2accounting "github.com/nspcc-dev/neofs-api-go/v2/accounting" protoaccounting "github.com/nspcc-dev/neofs-api-go/v2/accounting/grpc" - protosession "github.com/nspcc-dev/neofs-api-go/v2/session/grpc" protostatus "github.com/nspcc-dev/neofs-api-go/v2/status/grpc" accountingtest "github.com/nspcc-dev/neofs-sdk-go/accounting/test" apistatus "github.com/nspcc-dev/neofs-sdk-go/client/status" - neofscrypto "github.com/nspcc-dev/neofs-sdk-go/crypto" - neofscryptotest "github.com/nspcc-dev/neofs-sdk-go/crypto/test" "github.com/nspcc-dev/neofs-sdk-go/stat" "github.com/nspcc-dev/neofs-sdk-go/user" usertest "github.com/nspcc-dev/neofs-sdk-go/user/test" - "github.com/nspcc-dev/neofs-sdk-go/version" "github.com/stretchr/testify/require" - "google.golang.org/grpc" - "google.golang.org/grpc/codes" - "google.golang.org/grpc/status" - "google.golang.org/protobuf/types/known/timestamppb" ) -func newDefaultAccountingService(srv protoaccounting.AccountingServiceServer) testService { +func newDefaultAccountingService(t testing.TB, srv any) testService { + require.Implements(t, (*protoaccounting.AccountingServiceServer)(nil), srv) return testService{desc: &protoaccounting.AccountingService_ServiceDesc, impl: srv} } // returns Client of Accounting service provided by given server. -func newTestAccountingClient(t testing.TB, srv protoaccounting.AccountingServiceServer) *Client { - return newClient(t, newDefaultAccountingService(srv)) +func newTestAccountingClient(t testing.TB, srv any) *Client { + return newClient(t, newDefaultAccountingService(t, srv)) } type testGetBalanceServer struct { protoaccounting.UnimplementedAccountingServiceServer - - reqXHdrs []string - reqAcc []byte - - handlerErr error - - respSleep time.Duration - respUnsigned bool - respSigner neofscrypto.Signer - respMeta *protosession.ResponseMetaHeader - respBodyCons func() *protoaccounting.BalanceResponse_Body + testCommonServerSettings[ + *protoaccounting.BalanceRequest, + v2accounting.BalanceRequest, + *v2accounting.BalanceRequest, + protoaccounting.BalanceResponse_Body, + protoaccounting.BalanceResponse, + v2accounting.BalanceResponse, + *v2accounting.BalanceResponse, + ] + reqAcc []byte } // returns [protoaccounting.AccountingServiceServer] supporting Balance method @@ -57,45 +48,12 @@ type testGetBalanceServer struct { // responds with any valid message. Some methods allow to tune the behavior. func newTestGetBalanceServer() *testGetBalanceServer { return new(testGetBalanceServer) } -// makes the server to assert that any request has given X-headers. By -// default, no headers are expected. -func (x *testGetBalanceServer) checkRequestXHeaders(xhdrs []string) { - if len(xhdrs)%2 != 0 { - panic("odd number of elements") - } - x.reqXHdrs = xhdrs -} - // makes the server to assert that any request is for the given // account. By default, any account is accepted. func (x *testGetBalanceServer) checkRequestAccount(acc user.ID) { x.reqAcc = acc[:] } -// tells the server whether to sign all the responses or not. By default, any -// response is signed. -// -// Calling with false overrides signResponsesBy. -func (x *testGetBalanceServer) setEnabledResponseSigning(sign bool) { - x.respUnsigned = !sign -} - -// makes the server to always sign responses using given signer. By default, -// random signer is used. -// -// Has no effect with signing is disabled using setEnabledResponseSigning. -func (x *testGetBalanceServer) signResponsesBy(signer neofscrypto.Signer) { - x.respSigner = signer -} - -// makes the server to always respond with the specifically constructed body. By -// default, any valid body is returned. -// -// Conflicts with respondWithBalance. -func (x *testGetBalanceServer) respondWithBody(newBody func() *protoaccounting.BalanceResponse_Body) { - x.respBodyCons = newBody -} - // makes the server to always respond with the given balance. By default, any // valid balance is returned. // @@ -106,79 +64,13 @@ func (x *testGetBalanceServer) respondWithBalance(balance *protoaccounting.Decim }) } -// makes the server to always respond with the given meta header. By default, -// empty header is returned. -// -// Conflicts with respondWithStatus. -func (x *testGetBalanceServer) respondWithMeta(meta *protosession.ResponseMetaHeader) { - x.respMeta = meta -} - -// makes the server to always respond with the given status. By default, status -// OK is returned. -// -// Conflicts with respondWithMeta. -func (x *testGetBalanceServer) respondWithStatus(st *protostatus.Status) { - x.respondWithMeta(&protosession.ResponseMetaHeader{Status: st}) -} - -// makes the server to return given error from the handler. By default, some -// response message is returned. -func (x *testGetBalanceServer) setHandlerError(err error) { - x.handlerErr = err -} - -// makes the server to sleep specified time before any request processing. By -// default, and if dur is non-positive, request is handled instantly. -func (x *testGetBalanceServer) setSleepDuration(dur time.Duration) { - x.respSleep = dur -} - -func (x *testGetBalanceServer) verifyBalanceRequest(req *protoaccounting.BalanceRequest) error { - // signatures - var reqV2 v2accounting.BalanceRequest - if err := reqV2.FromGRPCMessage(req); err != nil { - panic(err) - } - if err := verifyServiceMessage(&reqV2); err != nil { - return newInvalidRequestVerificationHeaderErr(err) +func (x *testGetBalanceServer) verifyRequest(req *protoaccounting.BalanceRequest) error { + if err := x.testCommonServerSettings.verifyRequest(req); err != nil { + return err } // meta header - metaHdr := req.MetaHeader - curVersion := version.Current() - switch { - case metaHdr == nil: - return newInvalidRequestErr(errors.New("missing meta header")) - case metaHdr.Version == nil: - return newInvalidRequestMetaHeaderErr(errors.New("missing protocol version")) - case metaHdr.Version.Major != curVersion.Major() || metaHdr.Version.Minor != curVersion.Minor(): - return newInvalidRequestMetaHeaderErr(fmt.Errorf("wrong protocol version v%d.%d, expected %s", - metaHdr.Version.Major, metaHdr.Version.Minor, curVersion)) - case metaHdr.Epoch != 0: - return newInvalidRequestMetaHeaderErr(fmt.Errorf("non-zero epoch #%d", metaHdr.Epoch)) - case metaHdr.Ttl != 2: - return newInvalidRequestMetaHeaderErr(fmt.Errorf("wrong TTL %d, expected 2", metaHdr.Epoch)) - case metaHdr.SessionToken != nil: + if req.MetaHeader.SessionToken != nil { return newInvalidRequestMetaHeaderErr(errors.New("session token attached while should not be")) - case metaHdr.BearerToken != nil: - return newInvalidRequestMetaHeaderErr(errors.New("bearer token attached while should not be")) - case metaHdr.MagicNumber != 0: - return newInvalidRequestMetaHeaderErr(fmt.Errorf("non-zero network magic #%d", metaHdr.MagicNumber)) - case metaHdr.Origin != nil: - return newInvalidRequestMetaHeaderErr(errors.New("origin header is presented while should not be")) - case len(metaHdr.XHeaders) != len(x.reqXHdrs)/2: - return newInvalidRequestMetaHeaderErr(fmt.Errorf("number of x-headers %d differs parameterized %d", - len(metaHdr.XHeaders), len(x.reqXHdrs)/2)) - } - for i := range metaHdr.XHeaders { - if metaHdr.XHeaders[i].Key != x.reqXHdrs[2*i] { - return newInvalidRequestMetaHeaderErr(fmt.Errorf("x-header #%d key %q does not equal parameterized %q", - i, metaHdr.XHeaders[i].Key, x.reqXHdrs[2*i])) - } - if metaHdr.XHeaders[i].Value != x.reqXHdrs[2*i+1] { - return newInvalidRequestMetaHeaderErr(fmt.Errorf("x-header #%d value %q does not equal parameterized %q", - i, metaHdr.XHeaders[i].Value, x.reqXHdrs[2*i+1])) - } } // body body := req.Body @@ -195,16 +87,10 @@ func (x *testGetBalanceServer) verifyBalanceRequest(req *protoaccounting.Balance } func (x *testGetBalanceServer) Balance(_ context.Context, req *protoaccounting.BalanceRequest) (*protoaccounting.BalanceResponse, error) { - time.Sleep(x.respSleep) - - if err := x.verifyBalanceRequest(req); err != nil { + if err := x.verifyRequest(req); err != nil { return nil, err } - if x.handlerErr != nil { - return nil, x.handlerErr - } - resp := protoaccounting.BalanceResponse{ MetaHeader: x.respMeta, } @@ -219,23 +105,7 @@ func (x *testGetBalanceServer) Balance(_ context.Context, req *protoaccounting.B } } - if x.respUnsigned { - return &resp, nil - } - - var respV2 v2accounting.BalanceResponse - if err := respV2.FromGRPCMessage(&resp); err != nil { - panic(err) - } - signer := x.respSigner - if signer == nil { - signer = neofscryptotest.Signer() - } - if err := signServiceMessage(signer, &respV2, nil); err != nil { - return nil, fmt.Errorf("sign response message: %w", err) - } - - return respV2.ToGRPCMessage().(*protoaccounting.BalanceResponse), nil + return x.signResponse(&resp) } func TestClient_BalanceGet(t *testing.T) { @@ -366,58 +236,28 @@ func TestClient_BalanceGet(t *testing.T) { } }) t.Run("sign request failure", func(t *testing.T) { - c.prm.signer = neofscryptotest.FailSigner(neofscryptotest.Signer()) - _, err := c.BalanceGet(ctx, anyValidPrm) - require.ErrorContains(t, err, "sign request") + testSignRequestFailure(t, func(c *Client) error { + _, err := c.BalanceGet(ctx, anyValidPrm) + return err + }) }) t.Run("transport failure", func(t *testing.T) { - // note: errors returned from gRPC handlers are gRPC statuses, therefore, - // strictly speaking, they are not transport errors (like connection refusal for - // example). At the same time, according to the NeoFS protocol, all its statuses - // are transmitted in the message. So, returning an error from gRPC handler - // instead of a status field in the response is a protocol violation and can be - // equated to a transport error. - transportErr := errors.New("any transport failure") - srv := newTestGetBalanceServer() - srv.setHandlerError(transportErr) - c := newTestAccountingClient(t, srv) - - _, err := c.BalanceGet(ctx, anyValidPrm) - require.ErrorContains(t, err, "rpc failure") - require.ErrorContains(t, err, "write request") - st, ok := status.FromError(err) - require.True(t, ok) - require.Equal(t, codes.Unknown, st.Code()) - require.Contains(t, st.Message(), transportErr.Error()) + testTransportFailure(t, newTestGetBalanceServer, newTestAccountingClient, func(c *Client) error { + _, err := c.BalanceGet(ctx, anyValidPrm) + return err + }) }) - t.Run("response message decoding failure", func(t *testing.T) { - svc := testService{ - desc: &grpc.ServiceDesc{ServiceName: "neo.fs.v2.accounting.AccountingService", Methods: []grpc.MethodDesc{ - { - MethodName: "Balance", - Handler: func(srv any, ctx context.Context, dec func(any) error, interceptor grpc.UnaryServerInterceptor) (any, error) { - return timestamppb.Now(), nil // any completely different message - }, - }, - }}, - impl: nil, // disables interface assert - } - c := newClient(t, svc) - _, err := c.BalanceGet(ctx, anyValidPrm) - require.ErrorContains(t, err, "invalid response signature") - // TODO: Although the client will not accept such a response, current error - // does not make it clear what exactly the problem is. It is worth reacting to - // the incorrect structure if possible. + t.Run("response message type mismatch", func(t *testing.T) { + testUnaryRPCResponseTypeMismatch(t, "accounting.AccountingService", "Balance", func(c *Client) error { + _, err := c.BalanceGet(ctx, anyValidPrm) + return err + }) }) t.Run("invalid response verification header", func(t *testing.T) { - srv := newTestGetBalanceServer() - srv.setEnabledResponseSigning(false) - // TODO: add cases with less radical corruption such as replacing one byte or - // dropping only one of the signatures - c := newTestAccountingClient(t, srv) - - _, err := c.BalanceGet(ctx, anyValidPrm) - require.ErrorContains(t, err, "invalid response signature") + testInvalidResponseSignatures(t, newTestGetBalanceServer, newTestAccountingClient, func(c *Client) error { + _, err := c.BalanceGet(ctx, anyValidPrm) + return err + }) }) t.Run("invalid response body", func(t *testing.T) { for _, tc := range []struct { @@ -446,96 +286,21 @@ func TestClient_BalanceGet(t *testing.T) { } }) t.Run("response callback", func(t *testing.T) { - // NetmapService.LocalNodeInfo is called on dial, so it should also be - // initialized. The handler is called for it too. - nodeInfoSrvSigner := neofscryptotest.Signer() - nodeInfoSrvEpoch := rand.Uint64() - nodeInfoSrv := newTestGetNodeInfoServer() - nodeInfoSrv.respondWithMeta(&protosession.ResponseMetaHeader{Epoch: nodeInfoSrvEpoch}) - nodeInfoSrv.signResponsesBy(nodeInfoSrvSigner) - - balanceSrvSigner := neofscryptotest.Signer() - balanceSrvEpoch := nodeInfoSrvEpoch + 1 - balanceSrv := newTestGetBalanceServer() - balanceSrv.respondWithMeta(&protosession.ResponseMetaHeader{Epoch: balanceSrvEpoch}) - balanceSrv.signResponsesBy(balanceSrvSigner) - - var collected []ResponseMetaInfo - var cbErr error - c := newClientWithResponseCallback(t, func(meta ResponseMetaInfo) error { - collected = append(collected, meta) - return cbErr - }, - newDefaultNetmapServiceDesc(nodeInfoSrv), - newDefaultAccountingService(balanceSrv), - ) - - _, err := c.BalanceGet(ctx, anyValidPrm) - require.NoError(t, err) - require.Equal(t, []ResponseMetaInfo{ - {key: nodeInfoSrvSigner.PublicKeyBytes, epoch: nodeInfoSrvEpoch}, - {key: balanceSrvSigner.PublicKeyBytes, epoch: balanceSrvEpoch}, - }, collected) - - cbErr = errors.New("any response meta handler failure") - _, err = c.BalanceGet(ctx, anyValidPrm) - require.ErrorContains(t, err, "response callback error") - require.ErrorContains(t, err, err.Error()) - require.Len(t, collected, 3) - require.Equal(t, collected[2], collected[1]) + testResponseCallback(t, newTestGetBalanceServer, newDefaultAccountingService, func(c *Client) error { + _, err := c.BalanceGet(ctx, anyValidPrm) + return err + }) }) t.Run("exec statistics", func(t *testing.T) { - // NetmapService.LocalNodeInfo is called on dial, so it should also be - // initialized. Statistics are tracked for it too. - nodeEndpoint := "grpc://localhost:8082" // any valid - nodePub := []byte("any public key") - - nodeInfoSrv := newTestGetNodeInfoServer() - nodeInfoSrv.respondWithNodePublicKey(nodePub) - - balanceSrv := newTestGetBalanceServer() - - type statItem struct { - mtd stat.Method - dur time.Duration - err error - } - var lastItem *statItem - cb := func(pub []byte, endpoint string, mtd stat.Method, dur time.Duration, err error) { - if lastItem == nil { - require.Nil(t, pub) - } else { - require.Equal(t, nodePub, pub) - } - require.Equal(t, nodeEndpoint, endpoint) - require.Positive(t, dur) - lastItem = &statItem{mtd, dur, err} - } - - c := newCustomClient(t, nodeEndpoint, func(prm *PrmInit) { prm.SetStatisticCallback(cb) }, - newDefaultNetmapServiceDesc(nodeInfoSrv), - newDefaultAccountingService(balanceSrv), + testStatistic(t, newTestGetBalanceServer, newDefaultAccountingService, stat.MethodBalanceGet, + nil, + []testedClientOp{func(c *Client) error { + _, err := c.BalanceGet(ctx, PrmBalanceGet{}) + return err + }}, func(c *Client) error { + _, err := c.BalanceGet(ctx, anyValidPrm) + return err + }, ) - // dial - require.NotNil(t, lastItem) - require.Equal(t, stat.MethodEndpointInfo, lastItem.mtd) - require.Positive(t, lastItem.dur) - require.NoError(t, lastItem.err) - - // failure - _, callErr := c.BalanceGet(ctx, PrmBalanceGet{}) - require.Error(t, callErr) - require.Equal(t, stat.MethodBalanceGet, lastItem.mtd) - require.Positive(t, lastItem.dur) - require.Equal(t, callErr, lastItem.err) - - // OK - sleepDur := 100 * time.Millisecond - // duration is pretty short overall, but most likely larger than the exec time w/o sleep - balanceSrv.setSleepDuration(sleepDur) - _, _ = c.BalanceGet(ctx, anyValidPrm) - require.Equal(t, stat.MethodBalanceGet, lastItem.mtd) - require.Greater(t, lastItem.dur, sleepDur) - require.NoError(t, lastItem.err) }) } diff --git a/client/client_test.go b/client/client_test.go index 03004157..f25fcf65 100644 --- a/client/client_test.go +++ b/client/client_test.go @@ -2,17 +2,27 @@ package client import ( "context" + "errors" "fmt" + "math/rand" "net" "testing" + "time" + apigrpc "github.com/nspcc-dev/neofs-api-go/v2/rpc/grpc" + protosession "github.com/nspcc-dev/neofs-api-go/v2/session/grpc" + protostatus "github.com/nspcc-dev/neofs-api-go/v2/status/grpc" apistatus "github.com/nspcc-dev/neofs-sdk-go/client/status" neofscrypto "github.com/nspcc-dev/neofs-sdk-go/crypto" + neofscryptotest "github.com/nspcc-dev/neofs-sdk-go/crypto/test" + "github.com/nspcc-dev/neofs-sdk-go/stat" + "github.com/nspcc-dev/neofs-sdk-go/version" "github.com/stretchr/testify/require" "google.golang.org/grpc" "google.golang.org/grpc/codes" "google.golang.org/grpc/status" "google.golang.org/grpc/test/bufconn" + "google.golang.org/protobuf/types/known/timestamppb" ) /* @@ -210,3 +220,367 @@ type nopSigner struct{} func (nopSigner) Scheme() neofscrypto.Scheme { return neofscrypto.ECDSA_SHA512 } func (nopSigner) Sign([]byte) ([]byte, error) { return []byte("signature"), nil } func (x nopSigner) Public() neofscrypto.PublicKey { return nopPublicKey{} } + +// shares server code for various RPC servers. +type testCommonServerSettings[ + REQUEST interface { + GetMetaHeader() *protosession.RequestMetaHeader + }, + REQUESTV2 any, + REQUESTV2PTR interface { + *REQUESTV2 + FromGRPCMessage(apigrpc.Message) error + }, + RESPBODY any, + RESP any, + RESPV2 any, + RESPV2PTR interface { + *RESPV2 + ToGRPCMessage() apigrpc.Message + FromGRPCMessage(apigrpc.Message) error + }, +] struct { + handlerErr error + + reqXHdrs []string + + respSleep time.Duration + respUnsigned bool + respSigner neofscrypto.Signer + respMeta *protosession.ResponseMetaHeader + respBodyCons func() *RESPBODY +} + +// makes the server to return given error from the handler. By default, some +// response message is returned. +func (x *testCommonServerSettings[_, _, _, _, _, _, _]) setHandlerError(err error) { + x.handlerErr = err +} + +// makes the server to assert that any request has given X-headers. By +// default, no headers are expected. +func (x *testCommonServerSettings[_, _, _, _, _, _, _]) checkRequestXHeaders(xhdrs []string) { + if len(xhdrs)%2 != 0 { + panic("odd number of elements") + } + x.reqXHdrs = xhdrs +} + +// makes the server to sleep specified time before any request processing. By +// default, and if dur is non-positive, request is handled instantly. +func (x *testCommonServerSettings[_, _, _, _, _, _, _]) setSleepDuration(dur time.Duration) { + x.respSleep = dur +} + +// tells the server whether to sign all the responses or not. By default, any +// response is signed. +// +// Calling with false overrides signResponsesBy. +func (x *testCommonServerSettings[_, _, _, _, _, _, _]) respondWithoutSigning() { + x.respUnsigned = true +} + +// makes the server to always sign responses using given signer. By default, +// random signer is used. +// +// Has no effect with signing is disabled using respondWithoutSigning. +func (x *testCommonServerSettings[_, _, _, _, _, _, _]) signResponsesBy(signer neofscrypto.Signer) { + x.respSigner = signer +} + +// makes the server to always respond with the given meta header. By default, +// empty header is returned. +// +// Conflicts with respondWithStatus. +func (x *testCommonServerSettings[_, _, _, _, _, _, _]) respondWithMeta(meta *protosession.ResponseMetaHeader) { + x.respMeta = meta +} + +// makes the server to always respond with the given status. By default, status +// OK is returned. +// +// Conflicts with respondWithMeta. +func (x *testCommonServerSettings[_, _, _, _, _, _, _]) respondWithStatus(st *protostatus.Status) { + x.respondWithMeta(&protosession.ResponseMetaHeader{Status: st}) +} + +// makes the server to always respond with the specifically constructed body. By +// default, any valid body is returned. +func (x *testCommonServerSettings[_, _, _, RESPBODY, _, _, _]) respondWithBody(newBody func() *RESPBODY) { + x.respBodyCons = newBody +} + +func (x testCommonServerSettings[REQUEST, REQUESTV2, REQUESTV2PTR, _, _, _, _]) verifyRequest(req REQUEST) error { + time.Sleep(x.respSleep) + + // signatures + var reqV2 REQUESTV2 + if err := REQUESTV2PTR(&reqV2).FromGRPCMessage(req); err != nil { + panic(err) + } + if err := verifyServiceMessage(&reqV2); err != nil { + return newInvalidRequestVerificationHeaderErr(err) + } + // meta header + metaHdr := req.GetMetaHeader() + curVersion := version.Current() + switch { + case metaHdr == nil: + return newInvalidRequestErr(errors.New("missing meta header")) + case metaHdr.Version == nil: + return newInvalidRequestMetaHeaderErr(errors.New("missing protocol version")) + case metaHdr.Version.Major != curVersion.Major() || metaHdr.Version.Minor != curVersion.Minor(): + return newInvalidRequestMetaHeaderErr(fmt.Errorf("wrong protocol version v%d.%d, expected %s", + metaHdr.Version.Major, metaHdr.Version.Minor, curVersion)) + case metaHdr.Epoch != 0: + return newInvalidRequestMetaHeaderErr(fmt.Errorf("non-zero epoch #%d", metaHdr.Epoch)) + case metaHdr.Ttl != 2: + return newInvalidRequestMetaHeaderErr(fmt.Errorf("wrong TTL %d, expected 2", metaHdr.Epoch)) + case metaHdr.BearerToken != nil: + return newInvalidRequestMetaHeaderErr(errors.New("bearer token attached while should not be")) + case metaHdr.MagicNumber != 0: + return newInvalidRequestMetaHeaderErr(fmt.Errorf("non-zero network magic #%d", metaHdr.MagicNumber)) + case metaHdr.Origin != nil: + return newInvalidRequestMetaHeaderErr(errors.New("origin header is presented while should not be")) + case len(metaHdr.XHeaders) != len(x.reqXHdrs)/2: + return newInvalidRequestMetaHeaderErr(fmt.Errorf("number of x-headers %d differs parameterized %d", + len(metaHdr.XHeaders), len(x.reqXHdrs)/2)) + } + for i := range metaHdr.XHeaders { + if metaHdr.XHeaders[i].Key != x.reqXHdrs[2*i] { + return newInvalidRequestMetaHeaderErr(fmt.Errorf("x-header #%d key %q does not equal parameterized %q", + i, metaHdr.XHeaders[i].Key, x.reqXHdrs[2*i])) + } + if metaHdr.XHeaders[i].Value != x.reqXHdrs[2*i+1] { + return newInvalidRequestMetaHeaderErr(fmt.Errorf("x-header #%d value %q does not equal parameterized %q", + i, metaHdr.XHeaders[i].Value, x.reqXHdrs[2*i+1])) + } + } + return x.handlerErr +} + +func (x testCommonServerSettings[_, _, _, _, RESP, RESPV2, RESPV2PTR]) signResponse(resp *RESP) (*RESP, error) { + if x.respUnsigned { + return resp, nil + } + var r RESPV2 + respV2 := RESPV2PTR(&r) + if err := respV2.FromGRPCMessage(resp); err != nil { + panic(err) + } + signer := x.respSigner + if signer == nil { + signer = neofscryptotest.Signer() + } + if err := signServiceMessage(signer, respV2, nil); err != nil { + return nil, fmt.Errorf("sign response message: %w", err) + } + return respV2.ToGRPCMessage().(*RESP), nil +} + +type testedClientOp = func(*Client) error + +func assertSignRequestErr(t testing.TB, err error) { + require.ErrorContains(t, err, "sign request") +} + +func testSignRequestFailure(t testing.TB, op testedClientOp) { + c := newClient(t) + c.prm.signer = neofscryptotest.FailSigner(neofscryptotest.Signer()) + assertSignRequestErr(t, op(c)) +} + +func assertTransportErr(t testing.TB, transport, err error) { + require.ErrorContains(t, err, "rpc failure") + require.ErrorContains(t, err, "write request") + st, ok := status.FromError(err) + require.True(t, ok) + require.Equal(t, codes.Unknown, st.Code()) + require.Contains(t, st.Message(), transport.Error()) +} + +func testTransportFailure[SRV interface { + setHandlerError(error) +}](t testing.TB, newSrv func() SRV, bind func(testing.TB, any) *Client, op testedClientOp) { + transportErr := errors.New("any transport failure") + srv := newSrv() + srv.setHandlerError(transportErr) + c := bind(t, srv) + + err := op(c) + // note: errors returned from gRPC handlers are gRPC statuses, therefore, + // strictly speaking, they are not transport errors (like connection refusal for + // example). At the same time, according to the NeoFS protocol, all its statuses + // are transmitted in the message. So, returning an error from gRPC handler + // instead of a status field in the response is a protocol violation and can be + // equated to a transport error. + assertTransportErr(t, transportErr, err) +} + +func testInvalidResponseSignatures[SRV interface { + respondWithoutSigning() +}](t testing.TB, newSrv func() SRV, bind func(testing.TB, any) *Client, op testedClientOp) { + srv := newSrv() + srv.respondWithoutSigning() + // TODO: add cases with less radical corruption such as replacing one byte or + // dropping only one of the signatures + c := bind(t, srv) + require.ErrorContains(t, op(c), "invalid response signature") +} + +func testUnaryRPCResponseTypeMismatch(t testing.TB, svcName, method string, op testedClientOp) { + svc := testService{ + desc: &grpc.ServiceDesc{ServiceName: "neo.fs.v2." + svcName, Methods: []grpc.MethodDesc{ + { + MethodName: method, + Handler: func(srv any, ctx context.Context, dec func(any) error, interceptor grpc.UnaryServerInterceptor) (any, error) { + return timestamppb.Now(), nil // any completely different message + }, + }, + }}, + impl: nil, // disables interface assert + } + c := newClient(t, svc) + require.ErrorContains(t, op(c), "invalid response signature") + // TODO: Although the client will not accept such a response, current error + // does not make it clear what exactly the problem is. It is worth reacting to + // the incorrect structure if possible. +} + +func testResponseCallback[SRV interface { + respondWithMeta(*protosession.ResponseMetaHeader) + signResponsesBy(neofscrypto.Signer) +}](t testing.TB, newSrv func() SRV, newSvc func(testing.TB, any) testService, op testedClientOp) { + // NetmapService.LocalNodeInfo is called on dial, so it should also be + // initialized. The handler is called for it too. + nodeInfoSrvSigner := neofscryptotest.Signer() + nodeInfoSrvEpoch := rand.Uint64() + nodeInfoSrv := newTestGetNodeInfoServer() + nodeInfoSrv.respondWithMeta(&protosession.ResponseMetaHeader{Epoch: nodeInfoSrvEpoch}) + nodeInfoSrv.signResponsesBy(nodeInfoSrvSigner) + + srvSigner := neofscryptotest.Signer() + srvEpoch := nodeInfoSrvEpoch + 1 + srv := newSrv() + srv.respondWithMeta(&protosession.ResponseMetaHeader{Epoch: srvEpoch}) + srv.signResponsesBy(srvSigner) + + var collected []ResponseMetaInfo + var cbErr error + c := newClientWithResponseCallback(t, func(meta ResponseMetaInfo) error { + collected = append(collected, meta) + return cbErr + }, + newDefaultNetmapServiceDesc(nodeInfoSrv), + newSvc(t, srv), + ) + + err := op(c) + require.NoError(t, err) + require.Equal(t, []ResponseMetaInfo{ + {key: nodeInfoSrvSigner.PublicKeyBytes, epoch: nodeInfoSrvEpoch}, + {key: srvSigner.PublicKeyBytes, epoch: srvEpoch}, + }, collected) + + cbErr = errors.New("any response meta handler failure") + err = op(c) + require.ErrorContains(t, err, "response callback error") + require.ErrorIs(t, err, cbErr) + require.Len(t, collected, 3) + require.Equal(t, collected[2], collected[1]) +} + +func testStatistic[SRV interface { + setSleepDuration(time.Duration) + setHandlerError(error) +}](t testing.TB, newSrv func() SRV, newSvc func(testing.TB, any) testService, mtd stat.Method, + customNonStatFailures []testedClientOp, cunstomStatFailures []testedClientOp, validInputCall testedClientOp) { + // NetmapService.LocalNodeInfo is called on dial, so it should also be + // initialized. Statistics are tracked for it too. + nodeEndpoint := "grpc://localhost:8082" // any valid + nodePub := []byte("any public key") + + nodeInfoSrv := newTestGetNodeInfoServer() + nodeInfoSrv.respondWithNodePublicKey(nodePub) + + type statItem struct { + mtd stat.Method + dur time.Duration + err error + } + var lastItem *statItem + cb := func(pub []byte, endpoint string, mtd stat.Method, dur time.Duration, err error) { + if lastItem == nil { + require.Nil(t, pub) + } else { + require.Equal(t, nodePub, pub) + } + require.Equal(t, nodeEndpoint, endpoint) + require.Positive(t, dur) + lastItem = &statItem{mtd, dur, err} + } + + srv := newSrv() + c := newCustomClient(t, nodeEndpoint, func(prm *PrmInit) { prm.SetStatisticCallback(cb) }, + newDefaultNetmapServiceDesc(nodeInfoSrv), + newSvc(t, srv), + ) + // dial + require.NotNil(t, lastItem) + require.Equal(t, stat.MethodEndpointInfo, lastItem.mtd) + require.Positive(t, lastItem.dur) + require.NoError(t, lastItem.err) + + // custom non-stat failures + for _, getNonStatErr := range customNonStatFailures { + err := getNonStatErr(c) + require.Error(t, err) + require.Equal(t, mtd, lastItem.mtd) + require.Positive(t, lastItem.dur) + // TODO: strange that for some errors statistics are similar to OK + require.NoError(t, lastItem.err) + } + + // custom stat failures + for _, getStatErr := range cunstomStatFailures { + err := getStatErr(c) + require.Error(t, err) + require.Equal(t, mtd, lastItem.mtd) + require.Positive(t, lastItem.dur) + require.Equal(t, err, lastItem.err) + } + + // sign request failure + signerCp := c.prm.signer + c.prm.signer = neofscryptotest.FailSigner(c.prm.signer) + + err := validInputCall(c) + assertSignRequestErr(t, err) + require.Equal(t, mtd, lastItem.mtd) + require.Positive(t, lastItem.dur) + require.Equal(t, err, lastItem.err) + + c.prm.signer = signerCp + + // transport + transportErr := errors.New("any transport failure") + srv.setHandlerError(transportErr) + + err = validInputCall(c) + assertTransportErr(t, transportErr, err) + require.Equal(t, mtd, lastItem.mtd) + require.Positive(t, lastItem.dur) + require.Equal(t, err, lastItem.err) + + srv.setHandlerError(nil) + + // OK + sleepDur := 100 * time.Millisecond + // duration is pretty short overall, but most likely larger than the exec time w/o sleep + srv.setSleepDuration(sleepDur) + err = validInputCall(c) + require.NoError(t, err) + require.Equal(t, mtd, lastItem.mtd) + require.Greater(t, lastItem.dur, sleepDur) + require.NoError(t, lastItem.err) +} diff --git a/client/container_statistic_test.go b/client/container_statistic_test.go index 2412d494..8bd38f76 100644 --- a/client/container_statistic_test.go +++ b/client/container_statistic_test.go @@ -12,9 +12,7 @@ import ( "github.com/google/uuid" "github.com/nspcc-dev/neofs-sdk-go/container" "github.com/nspcc-dev/neofs-sdk-go/container/acl" - cid "github.com/nspcc-dev/neofs-sdk-go/container/id" cidtest "github.com/nspcc-dev/neofs-sdk-go/container/id/test" - "github.com/nspcc-dev/neofs-sdk-go/eacl" "github.com/nspcc-dev/neofs-sdk-go/netmap" "github.com/nspcc-dev/neofs-sdk-go/object" oid "github.com/nspcc-dev/neofs-sdk-go/object/id" @@ -91,107 +89,6 @@ func prepareContainer(accountID user.ID) container.Container { return cont } -func testEaclTable(containerID cid.ID) eacl.Table { - var table eacl.Table - table.SetCID(containerID) - - r := eacl.ConstructRecord(eacl.ActionAllow, eacl.OperationPut, []eacl.Target{eacl.NewTargetByRole(eacl.RoleOthers)}) - table.AddRecord(&r) - - return table -} - -func TestClientStatistic_ContainerPut(t *testing.T) { - usr := usertest.User() - ctx := context.Background() - var srv testPutContainerServer - c := newTestContainerClient(t, &srv) - cont := prepareContainer(usr.ID) - - collector := newCollector() - c.prm.statisticCallback = collector.Collect - - var prm PrmContainerPut - _, err := c.ContainerPut(ctx, cont, usr.RFC6979, prm) - require.NoError(t, err) - - require.Equal(t, 1, collector.methods[stat.MethodContainerPut].requests) -} - -func TestClientStatistic_ContainerGet(t *testing.T) { - ctx := context.Background() - var srv testGetContainerServer - c := newTestContainerClient(t, &srv) - collector := newCollector() - c.prm.statisticCallback = collector.Collect - - var prm PrmContainerGet - _, err := c.ContainerGet(ctx, cid.ID{}, prm) - require.NoError(t, err) - - require.Equal(t, 1, collector.methods[stat.MethodContainerGet].requests) -} - -func TestClientStatistic_ContainerList(t *testing.T) { - usr := usertest.User() - ctx := context.Background() - var srv testListContainersServer - c := newTestContainerClient(t, &srv) - collector := newCollector() - c.prm.statisticCallback = collector.Collect - - var prm PrmContainerList - _, err := c.ContainerList(ctx, usr.ID, prm) - require.NoError(t, err) - - require.Equal(t, 1, collector.methods[stat.MethodContainerList].requests) -} - -func TestClientStatistic_ContainerDelete(t *testing.T) { - usr := usertest.User() - ctx := context.Background() - var srv testDeleteContainerServer - c := newTestContainerClient(t, &srv) - collector := newCollector() - c.prm.statisticCallback = collector.Collect - - var prm PrmContainerDelete - err := c.ContainerDelete(ctx, cid.ID{}, usr, prm) - require.NoError(t, err) - - require.Equal(t, 1, collector.methods[stat.MethodContainerDelete].requests) -} - -func TestClientStatistic_ContainerEacl(t *testing.T) { - ctx := context.Background() - var srv testGetEACLServer - c := newTestContainerClient(t, &srv) - collector := newCollector() - c.prm.statisticCallback = collector.Collect - - var prm PrmContainerEACL - _, err := c.ContainerEACL(ctx, cid.ID{}, prm) - require.NoError(t, err) - - require.Equal(t, 1, collector.methods[stat.MethodContainerEACL].requests) -} - -func TestClientStatistic_ContainerSetEacl(t *testing.T) { - usr := usertest.User() - ctx := context.Background() - var srv testSetEACLServer - c := newTestContainerClient(t, &srv) - collector := newCollector() - c.prm.statisticCallback = collector.Collect - - var prm PrmContainerSetEACL - table := testEaclTable(cidtest.ID()) - err := c.ContainerSetEACL(ctx, table, usr, prm) - require.NoError(t, err) - - require.Equal(t, 1, collector.methods[stat.MethodContainerSetEACL].requests) -} - func TestClientStatistic_ContainerAnnounceUsedSpace(t *testing.T) { ctx := context.Background() var srv testAnnounceContainerSpaceServer diff --git a/client/container_test.go b/client/container_test.go index 0392ea24..6c2ccdfc 100644 --- a/client/container_test.go +++ b/client/container_test.go @@ -1,204 +1,2609 @@ package client import ( + "bytes" "context" + "crypto/ecdsa" + "crypto/elliptic" + "crypto/sha256" + "errors" "fmt" + "math" + "math/big" + "math/rand" "testing" + v2acl "github.com/nspcc-dev/neofs-api-go/v2/acl" protoacl "github.com/nspcc-dev/neofs-api-go/v2/acl/grpc" apicontainer "github.com/nspcc-dev/neofs-api-go/v2/container" protocontainer "github.com/nspcc-dev/neofs-api-go/v2/container/grpc" + protonetmap "github.com/nspcc-dev/neofs-api-go/v2/netmap/grpc" protorefs "github.com/nspcc-dev/neofs-api-go/v2/refs/grpc" + apisession "github.com/nspcc-dev/neofs-api-go/v2/session" + protosession "github.com/nspcc-dev/neofs-api-go/v2/session/grpc" + protostatus "github.com/nspcc-dev/neofs-api-go/v2/status/grpc" + apistatus "github.com/nspcc-dev/neofs-sdk-go/client/status" "github.com/nspcc-dev/neofs-sdk-go/container" cid "github.com/nspcc-dev/neofs-sdk-go/container/id" cidtest "github.com/nspcc-dev/neofs-sdk-go/container/id/test" containertest "github.com/nspcc-dev/neofs-sdk-go/container/test" + neofscrypto "github.com/nspcc-dev/neofs-sdk-go/crypto" + neofsecdsa "github.com/nspcc-dev/neofs-sdk-go/crypto/ecdsa" neofscryptotest "github.com/nspcc-dev/neofs-sdk-go/crypto/test" "github.com/nspcc-dev/neofs-sdk-go/eacl" + "github.com/nspcc-dev/neofs-sdk-go/netmap" + "github.com/nspcc-dev/neofs-sdk-go/session" + sessiontest "github.com/nspcc-dev/neofs-sdk-go/session/test" + "github.com/nspcc-dev/neofs-sdk-go/stat" + "github.com/nspcc-dev/neofs-sdk-go/user" + usertest "github.com/nspcc-dev/neofs-sdk-go/user/test" + "github.com/stretchr/testify/assert" "github.com/stretchr/testify/require" + "google.golang.org/protobuf/proto" ) +var anyValidMinProtoContainer = &protocontainer.Container{ + Version: new(protorefs.Version), + OwnerId: &protorefs.OwnerID{Value: []byte{53, 233, 31, 174, 37, 64, 241, 22, 182, 130, 7, 210, 222, 150, 85, 18, 106, 4, + 253, 122, 191, 90, 168, 187, 245}}, + Nonce: []byte{207, 5, 57, 28, 224, 103, 76, 207, 133, 186, 108, 96, 185, 52, 37, 205}, + PlacementPolicy: &protonetmap.PlacementPolicy{ + Replicas: make([]*protonetmap.Replica, 1), + }, +} + +var anyValidFullProtoContainer = &protocontainer.Container{ + Version: &protorefs.Version{Major: 538919038, Minor: 3957317479}, + OwnerId: proto.Clone(anyValidMinProtoContainer.OwnerId).(*protorefs.OwnerID), + Nonce: bytes.Clone(anyValidMinProtoContainer.Nonce), + BasicAcl: 1043832770, + Attributes: []*protocontainer.Container_Attribute{ + {Key: "k1", Value: "v1"}, + {Key: "k2", Value: "v2"}, + {Key: "Name", Value: "any container name"}, + {Key: "Timestamp", Value: "1732577694"}, + {Key: "__NEOFS__NAME", Value: "any domain name"}, + {Key: "__NEOFS__ZONE", Value: "any domain zone"}, + {Key: "__NEOFS__DISABLE_HOMOMORPHIC_HASHING", Value: "true"}, + }, + PlacementPolicy: &protonetmap.PlacementPolicy{ + Replicas: []*protonetmap.Replica{ + {Count: 3060437, Selector: "selector1"}, + {Count: 156936495, Selector: "selector2"}, + }, + ContainerBackupFactor: 920231904, + Selectors: []*protonetmap.Selector{ + {Name: "selector1", Count: 1663184999, Clause: 1, Attribute: "attribute1", Filter: "filter1"}, + {Name: "selector2", Count: 2649065896, Clause: 2, Attribute: "attribute2", Filter: "filter2"}, + {Name: "selector_max", Count: 2649065896, Clause: math.MaxInt32, Attribute: "attribute_max", Filter: "filter_max"}, + }, + Filters: []*protonetmap.Filter{ + {Name: "filter1", Key: "key1", Op: 0, Value: "value1", Filters: []*protonetmap.Filter{ + {}, + {}, + }}, + {Op: 1}, + {Op: 2}, + {Op: 3}, + {Op: 4}, + {Op: 5}, + {Op: 6}, + {Op: 7}, + {Op: 8}, + {Op: math.MaxInt32}, + }, + SubnetId: &protorefs.SubnetID{Value: 987533317}, + }, +} + +var anyValidProtoContainerIDs = []*protorefs.ContainerID{ + {Value: []byte{198, 137, 143, 192, 231, 50, 106, 89, 225, 118, 7, 42, 40, 225, 197, 183, 9, 205, 71, 140, 233, 30, 63, 73, 224, 244, 235, 18, 205, 45, 155, 236}}, + {Value: []byte{26, 71, 99, 242, 146, 121, 0, 142, 95, 50, 78, 190, 222, 104, 252, 72, 48, 219, 67, 226, 30, 90, 103, 51, 1, 234, 136, 143, 200, 240, 75, 250}}, + {Value: []byte{51, 124, 45, 83, 227, 119, 66, 76, 220, 196, 118, 197, 116, 44, 138, 83, 103, 102, 134, 191, 108, 124, 162, 255, 184, 137, 193, 242, 178, 10, 23, 29}}, +} + +var anyValidMinEACL = &protoacl.EACLTable{} + +var anyValidFullEACL = &protoacl.EACLTable{ + Version: &protorefs.Version{Major: 538919038, Minor: 3957317479}, + ContainerId: proto.Clone(anyValidProtoContainerIDs[0]).(*protorefs.ContainerID), + Records: []*protoacl.EACLRecord{ + {}, + {Operation: 1, Action: 1}, + {Operation: 2, Action: 2}, + {Operation: 3, Action: 3}, + {Operation: 4, Action: math.MaxInt32}, + {Operation: 5}, + {Operation: 6}, + {Operation: 7}, + {Operation: math.MaxInt32}, + {Filters: []*protoacl.EACLRecord_Filter{ + {HeaderType: 0, MatchType: 0, Key: "key1", Value: "val1"}, + {HeaderType: 1, MatchType: 1}, + {HeaderType: 2, MatchType: 2}, + {HeaderType: 3, MatchType: 3}, + {HeaderType: math.MaxInt32, MatchType: 4}, + {MatchType: 5}, + {MatchType: 6}, + {MatchType: 7}, + {MatchType: math.MaxInt32}, + }}, + {Targets: []*protoacl.EACLRecord_Target{ + {Role: 0, Keys: [][]byte{[]byte("key1"), []byte("key2")}}, + {Role: 1}, + {Role: 2}, + {Role: 3}, + {Role: math.MaxInt32}, + }}, + }, +} + +func newDefaultContainerService(t testing.TB, srv any) testService { + require.Implements(t, (*protocontainer.ContainerServiceServer)(nil), srv) + return testService{desc: &protocontainer.ContainerService_ServiceDesc, impl: srv} +} + // returns Client of Container service provided by given server. -func newTestContainerClient(t testing.TB, srv protocontainer.ContainerServiceServer) *Client { - return newClient(t, testService{desc: &protocontainer.ContainerService_ServiceDesc, impl: srv}) +func newTestContainerClient(t testing.TB, srv any) *Client { + return newClient(t, newDefaultContainerService(t, srv)) +} + +// for sharing between servers of requests with container session token. +type testContainerSessionServerSettings struct { + expectedToken *session.Container +} + +// makes the server to assert that any request carries given session token. By +// default, session token must not be attached. +func (x *testContainerSessionServerSettings) checkRequestSessionToken(st session.Container) { + x.expectedToken = &st +} + +func (x *testContainerSessionServerSettings) verifySessionToken(m *protosession.SessionToken) error { + if m == nil { + if x.expectedToken != nil { + return newInvalidRequestMetaHeaderErr(errors.New("session token is missing while should not be")) + } + return nil + } + if x.expectedToken == nil { + return newInvalidRequestMetaHeaderErr(errors.New("session token attached while should not be")) + } + var stV2 apisession.Token + if err := stV2.FromGRPCMessage(m); err != nil { + panic(err) + } + var st session.Container + if err := st.ReadFromV2(stV2); err != nil { + return newInvalidRequestMetaHeaderErr(fmt.Errorf("invalid session token: %w", err)) + } + if !assert.ObjectsAreEqual(st, *x.expectedToken) { + return newInvalidRequestMetaHeaderErr(errors.New("session token differs the parameterized one")) + } + return nil +} + +// for sharing between servers of requests with RFC 6979 signature of particular +// data. +type testRFC6979DataSignatureServerSettings struct { + reqPub *ecdsa.PublicKey + reqDataSignature *neofscrypto.Signature +} + +// makes the server to assert that any request carries signature of the +// particular data calculated using given private key. By default, any key can +// be used. +// +// Has no effect with checkRequestDataSignature. +func (x *testRFC6979DataSignatureServerSettings) checkRequestDataSignerKey(pk ecdsa.PrivateKey) { + x.reqPub = &pk.PublicKey +} + +// makes the server to assert that any request carries given signature without +// verification. By default, any signature matching the data is accepted. +// +// Overrides checkRequestDataSignerKey. +func (x *testRFC6979DataSignatureServerSettings) checkRequestDataSignature(s neofscrypto.Signature) { + x.reqDataSignature = &s +} + +func (x *testRFC6979DataSignatureServerSettings) verifyDataSignature(signedField string, data []byte, m *protorefs.SignatureRFC6979) error { + field := signedField + " signature" + if m == nil { + return newErrMissingRequestBodyField(field) + } + if x.reqDataSignature != nil { + if exp := x.reqDataSignature.PublicKeyBytes(); !bytes.Equal(m.Key, exp) { + return newErrInvalidRequestField(field, fmt.Errorf("public key %x != parameterized %x", m.Key, exp)) + } + if exp := x.reqDataSignature.Value(); !bytes.Equal(m.Sign, exp) { + return newErrInvalidRequestField(field, fmt.Errorf("value %x != parameterized %x", m.Sign, exp)) + } + return nil + } + + reqPubX, reqPubY := elliptic.UnmarshalCompressed(elliptic.P256(), m.Key) + if reqPubX == nil { + return newErrInvalidRequestField(field, fmt.Errorf("invalid EC point binary %x", m.Key)) + } + if x.reqPub != nil && (reqPubX.Cmp(x.reqPub.X) != 0 || reqPubY.Cmp(x.reqPub.Y) != 0) { + return newErrInvalidRequestField(field, fmt.Errorf("EC point != the parameterized one")) + } + sig := m.Sign + if len(sig) != 64 { + return newErrInvalidRequestField(field, fmt.Errorf("invalid signature length %d", len(sig))) + } + h := sha256.Sum256(data) + if !ecdsa.Verify(&ecdsa.PublicKey{Curve: elliptic.P256(), X: reqPubX, Y: reqPubY}, h[:], + new(big.Int).SetBytes(sig[0:32]), new(big.Int).SetBytes(sig[32:])) { + return newErrInvalidRequestField(field, fmt.Errorf("signature mismatches the %s", signedField)) + } + return nil +} + +// for sharing between servers of requests with required container ID. +type testRequestContainerIDServerSettings struct { + expectedReqCnrID []byte +} + +// makes the server to assert that any request carries given container ID. By +// default, any ID is accepted. +func (x *testRequestContainerIDServerSettings) checkRequestContainerID(id cid.ID) { + x.expectedReqCnrID = id[:] +} + +func (x *testRequestContainerIDServerSettings) verifyRequestContainerID(m *protorefs.ContainerID) error { + if m == nil { + return newErrMissingRequestBodyField("container ID") + } + if x.expectedReqCnrID != nil && !bytes.Equal(m.Value, x.expectedReqCnrID) { + return newErrInvalidRequestField("container ID", fmt.Errorf("container ID %x != the parameterized %x", + m.Value, x.expectedReqCnrID)) + } + return nil } type testPutContainerServer struct { protocontainer.UnimplementedContainerServiceServer + testCommonServerSettings[ + *protocontainer.PutRequest, + apicontainer.PutRequest, + *apicontainer.PutRequest, + protocontainer.PutResponse_Body, + protocontainer.PutResponse, + apicontainer.PutResponse, + *apicontainer.PutResponse, + ] + testContainerSessionServerSettings + testRFC6979DataSignatureServerSettings + reqContainer *container.Container } -func (x *testPutContainerServer) Put(context.Context, *protocontainer.PutRequest) (*protocontainer.PutResponse, error) { - id := cidtest.ID() - resp := protocontainer.PutResponse{ - Body: &protocontainer.PutResponse_Body{ - ContainerId: &protorefs.ContainerID{Value: id[:]}, - }, - } +// returns [protocontainer.ContainerServiceServer] supporting Put method only. +// Default implementation performs common verification of any request, and +// responds with any valid message. Some methods allow to tune the behavior. +func newTestPutContainerServer() *testPutContainerServer { return new(testPutContainerServer) } + +// makes the server to assert that any request carries given container. By +// default, any valid container is accepted. +func (x *testPutContainerServer) checkRequestContainer(cnr container.Container) { + x.reqContainer = &cnr +} + +// makes the server to always respond with the given ID. By default, any +// valid ID is returned. +// +// Conflicts with respondWithBody. +func (x *testPutContainerServer) respondWithID(id []byte) { + x.respondWithBody(func() *protocontainer.PutResponse_Body { + return &protocontainer.PutResponse_Body{ContainerId: &protorefs.ContainerID{Value: id}} + }) +} - var respV2 apicontainer.PutResponse - if err := respV2.FromGRPCMessage(&resp); err != nil { +func (x *testPutContainerServer) verifyRequest(req *protocontainer.PutRequest) error { + if err := x.testCommonServerSettings.verifyRequest(req); err != nil { + return err + } + // session token + if err := x.verifySessionToken(req.MetaHeader.SessionToken); err != nil { + return err + } + // body + body := req.Body + if body == nil { + return newInvalidRequestBodyErr(errors.New("missing body")) + } + // container + if body.Container == nil { + return newErrMissingRequestBodyField("container") + } + var cnrV2 apicontainer.Container + if err := cnrV2.FromGRPCMessage(body.Container); err != nil { panic(err) } - if err := signServiceMessage(neofscryptotest.Signer(), &respV2, nil); err != nil { - return nil, fmt.Errorf("sign response message: %w", err) + var cnr container.Container + if err := cnr.ReadFromV2(cnrV2); err != nil { + return newErrInvalidRequestField("container", fmt.Errorf("invalid container: %w", err)) + } + if x.reqContainer != nil && !assert.ObjectsAreEqual(cnr, *x.reqContainer) { + return newErrInvalidRequestField("container", errors.New("container differs the parameterized one")) + } + // signature + return x.verifyDataSignature("container", cnr.Marshal(), body.Signature) +} + +func (x *testPutContainerServer) Put(_ context.Context, req *protocontainer.PutRequest) (*protocontainer.PutResponse, error) { + if err := x.verifyRequest(req); err != nil { + return nil, err } - return respV2.ToGRPCMessage().(*protocontainer.PutResponse), nil + resp := protocontainer.PutResponse{ + MetaHeader: x.respMeta, + } + if x.respBodyCons != nil { + resp.Body = x.respBodyCons() + } else { + id := cidtest.ID() + resp.Body = &protocontainer.PutResponse_Body{ + ContainerId: &protorefs.ContainerID{Value: id[:]}, + } + } + + return x.signResponse(&resp) } type testGetContainerServer struct { protocontainer.UnimplementedContainerServiceServer + testCommonServerSettings[ + *protocontainer.GetRequest, + apicontainer.GetRequest, + *apicontainer.GetRequest, + protocontainer.GetResponse_Body, + protocontainer.GetResponse, + apicontainer.GetResponse, + *apicontainer.GetResponse, + ] + testRequestContainerIDServerSettings } -func (x *testGetContainerServer) Get(context.Context, *protocontainer.GetRequest) (*protocontainer.GetResponse, error) { - cnr := containertest.Container() - var cnrV2 apicontainer.Container - cnr.WriteToV2(&cnrV2) - resp := protocontainer.GetResponse{ - Body: &protocontainer.GetResponse_Body{ - Container: cnrV2.ToGRPCMessage().(*protocontainer.Container), - }, +// returns [protocontainer.ContainerServiceServer] supporting Get method only. +// Default implementation performs common verification of any request, and +// responds with any valid message. Some methods allow to tune the behavior. +func newTestGetContainerServer() *testGetContainerServer { return new(testGetContainerServer) } + +// makes the server to always respond with the given container. By default, any +// valid container is returned. +// +// Conflicts with respondWithBody. +func (x *testGetContainerServer) respondWithContainer(cnr *protocontainer.Container) { + x.respondWithBody(func() *protocontainer.GetResponse_Body { + return &protocontainer.GetResponse_Body{Container: cnr} + }) +} + +func (x *testGetContainerServer) verifyRequest(req *protocontainer.GetRequest) error { + if err := x.testCommonServerSettings.verifyRequest(req); err != nil { + return err } + // body + body := req.Body + if body == nil { + return newInvalidRequestBodyErr(errors.New("missing body")) + } + return x.verifyRequestContainerID(body.ContainerId) +} - var respV2 apicontainer.GetResponse - if err := respV2.FromGRPCMessage(&resp); err != nil { - panic(err) +func (x *testGetContainerServer) Get(_ context.Context, req *protocontainer.GetRequest) (*protocontainer.GetResponse, error) { + if err := x.verifyRequest(req); err != nil { + return nil, err } - if err := signServiceMessage(neofscryptotest.Signer(), &respV2, nil); err != nil { - return nil, fmt.Errorf("sign response message: %w", err) + + resp := protocontainer.GetResponse{ + MetaHeader: x.respMeta, + } + if x.respBodyCons != nil { + resp.Body = x.respBodyCons() + } else { + resp.Body = &protocontainer.GetResponse_Body{ + Container: proto.Clone(anyValidFullProtoContainer).(*protocontainer.Container), + Signature: &protorefs.SignatureRFC6979{Key: []byte("any_key"), Sign: []byte("any_signature")}, + SessionToken: &protosession.SessionToken{ + Body: &protosession.SessionToken_Body{ + Id: []byte("any_ID"), + OwnerId: &protorefs.OwnerID{Value: []byte("any_user")}, + Lifetime: &protosession.SessionToken_Body_TokenLifetime{Exp: 1, Nbf: 2, Iat: 3}, + SessionKey: []byte("any_session_key"), + }, + Signature: &protorefs.Signature{ + Key: []byte("any_key"), + Sign: []byte("any_signature"), + Scheme: protorefs.SignatureScheme(rand.Int31()), + }, + }, + } } - return respV2.ToGRPCMessage().(*protocontainer.GetResponse), nil + return x.signResponse(&resp) } type testListContainersServer struct { protocontainer.UnimplementedContainerServiceServer + testCommonServerSettings[ + *protocontainer.ListRequest, + apicontainer.ListRequest, + *apicontainer.ListRequest, + protocontainer.ListResponse_Body, + protocontainer.ListResponse, + apicontainer.ListResponse, + *apicontainer.ListResponse, + ] + reqOwner []byte } -func (x *testListContainersServer) List(context.Context, *protocontainer.ListRequest) (*protocontainer.ListResponse, error) { - var resp protocontainer.ListResponse +// returns [protocontainer.ContainerServiceServer] supporting List method only. +// Default implementation performs common verification of any request, and +// responds with any valid message. Some methods allow to tune the behavior. +func newTestListContainersServer() *testListContainersServer { return new(testListContainersServer) } - var respV2 apicontainer.ListResponse - if err := respV2.FromGRPCMessage(&resp); err != nil { - panic(err) +// makes the server to assert that any request carries given owner. By default, +// any user is accepted. +func (x *testListContainersServer) checkOwner(owner user.ID) { x.reqOwner = owner[:] } + +// makes the server to always respond with the given IDs. By default, several +// valid IDs are returned. +// +// Conflicts with respondWithBody. +func (x *testListContainersServer) respondWithIDs(ids []*protorefs.ContainerID) { + x.respondWithBody(func() *protocontainer.ListResponse_Body { + return &protocontainer.ListResponse_Body{ContainerIds: ids} + }) +} + +func (x *testListContainersServer) verifyRequest(req *protocontainer.ListRequest) error { + if err := x.testCommonServerSettings.verifyRequest(req); err != nil { + return err + } + // body + body := req.Body + if body == nil { + return newInvalidRequestBodyErr(errors.New("missing body")) + } + // owner + if body.OwnerId == nil { + return newErrMissingRequestBodyField("owner") + } + if x.reqOwner != nil && !bytes.Equal(body.OwnerId.Value, x.reqOwner) { + return newErrInvalidRequestField("owner", fmt.Errorf("owner %x != the parameterized %x", + body.OwnerId.Value, x.reqOwner)) + } + return nil +} + +func (x *testListContainersServer) List(_ context.Context, req *protocontainer.ListRequest) (*protocontainer.ListResponse, error) { + if err := x.verifyRequest(req); err != nil { + return nil, err + } + + resp := protocontainer.ListResponse{ + MetaHeader: x.respMeta, } - if err := signServiceMessage(neofscryptotest.Signer(), &respV2, nil); err != nil { - return nil, fmt.Errorf("sign response message: %w", err) + if x.respBodyCons != nil { + resp.Body = x.respBodyCons() + } else { + ids := make([]*protorefs.ContainerID, len(anyValidProtoContainerIDs)) + for i := range anyValidProtoContainerIDs { + ids[i] = proto.Clone(anyValidProtoContainerIDs[i]).(*protorefs.ContainerID) + } + resp.Body = &protocontainer.ListResponse_Body{ContainerIds: ids} } - return respV2.ToGRPCMessage().(*protocontainer.ListResponse), nil + return x.signResponse(&resp) } type testDeleteContainerServer struct { protocontainer.UnimplementedContainerServiceServer + testCommonServerSettings[ + *protocontainer.DeleteRequest, + apicontainer.DeleteRequest, + *apicontainer.DeleteRequest, + protocontainer.DeleteResponse_Body, + protocontainer.DeleteResponse, + apicontainer.DeleteResponse, + *apicontainer.DeleteResponse, + ] + testContainerSessionServerSettings + testRequestContainerIDServerSettings + testRFC6979DataSignatureServerSettings } -func (x *testDeleteContainerServer) Delete(context.Context, *protocontainer.DeleteRequest) (*protocontainer.DeleteResponse, error) { - var resp protocontainer.DeleteResponse +// returns [protocontainer.ContainerServiceServer] supporting Delete method only. +// Default implementation performs common verification of any request, and +// responds with any valid message. Some methods allow to tune the behavior. +func newTestDeleteContainerServer() *testDeleteContainerServer { return new(testDeleteContainerServer) } - var respV2 apicontainer.DeleteResponse - if err := respV2.FromGRPCMessage(&resp); err != nil { - panic(err) +func (x *testDeleteContainerServer) verifyRequest(req *protocontainer.DeleteRequest) error { + if err := x.testCommonServerSettings.verifyRequest(req); err != nil { + return err + } + // session token + if err := x.verifySessionToken(req.MetaHeader.SessionToken); err != nil { + return err } - if err := signServiceMessage(neofscryptotest.Signer(), &respV2, nil); err != nil { - return nil, fmt.Errorf("sign response message: %w", err) + // body + body := req.Body + if body == nil { + return newInvalidRequestBodyErr(errors.New("missing body")) } + // ID + if err := x.verifyRequestContainerID(body.ContainerId); err != nil { + return err + } + // signature + return x.verifyDataSignature("container ID", body.ContainerId.Value, body.Signature) +} - return respV2.ToGRPCMessage().(*protocontainer.DeleteResponse), nil +func (x *testDeleteContainerServer) Delete(_ context.Context, req *protocontainer.DeleteRequest) (*protocontainer.DeleteResponse, error) { + if err := x.verifyRequest(req); err != nil { + return nil, err + } + + resp := protocontainer.DeleteResponse{ + MetaHeader: x.respMeta, + } + + return x.signResponse(&resp) } type testGetEACLServer struct { protocontainer.UnimplementedContainerServiceServer + testCommonServerSettings[ + *protocontainer.GetExtendedACLRequest, + apicontainer.GetExtendedACLRequest, + *apicontainer.GetExtendedACLRequest, + protocontainer.GetExtendedACLResponse_Body, + protocontainer.GetExtendedACLResponse, + apicontainer.GetExtendedACLResponse, + *apicontainer.GetExtendedACLResponse, + ] + testRequestContainerIDServerSettings } -func (x *testGetEACLServer) GetExtendedACL(context.Context, *protocontainer.GetExtendedACLRequest) (*protocontainer.GetExtendedACLResponse, error) { - resp := protocontainer.GetExtendedACLResponse{ - Body: &protocontainer.GetExtendedACLResponse_Body{ - Eacl: new(protoacl.EACLTable), - }, +// returns [protocontainer.ContainerServiceServer] supporting GetExtendedACL +// method only. Default implementation performs common verification of any +// request, and responds with any valid message. Some methods allow to tune the +// behavior. +func newTestGetEACLServer() *testGetEACLServer { return new(testGetEACLServer) } + +// makes the server to always respond with the given eACL. By default, any +// valid eACL is returned. +// +// Conflicts with respondWithBody. +func (x *testGetEACLServer) respondWithEACL(eACL *protoacl.EACLTable) { + x.respondWithBody(func() *protocontainer.GetExtendedACLResponse_Body { + return &protocontainer.GetExtendedACLResponse_Body{Eacl: eACL} + }) +} + +func (x *testGetEACLServer) verifyRequest(req *protocontainer.GetExtendedACLRequest) error { + if err := x.testCommonServerSettings.verifyRequest(req); err != nil { + return err + } + // body + body := req.Body + if body == nil { + return newInvalidRequestBodyErr(errors.New("missing body")) } + // + return x.verifyRequestContainerID(body.ContainerId) +} - var respV2 apicontainer.GetExtendedACLResponse - if err := respV2.FromGRPCMessage(&resp); err != nil { - panic(err) +func (x *testGetEACLServer) GetExtendedACL(_ context.Context, req *protocontainer.GetExtendedACLRequest) (*protocontainer.GetExtendedACLResponse, error) { + if err := x.verifyRequest(req); err != nil { + return nil, err + } + + resp := protocontainer.GetExtendedACLResponse{ + MetaHeader: x.respMeta, } - if err := signServiceMessage(neofscryptotest.Signer(), &respV2, nil); err != nil { - return nil, fmt.Errorf("sign response message: %w", err) + if x.respBodyCons != nil { + resp.Body = x.respBodyCons() + } else { + resp.Body = &protocontainer.GetExtendedACLResponse_Body{ + Eacl: proto.Clone(anyValidFullEACL).(*protoacl.EACLTable), + Signature: &protorefs.SignatureRFC6979{Key: []byte("any_key"), Sign: []byte("any_signature")}, + SessionToken: &protosession.SessionToken{ + Body: &protosession.SessionToken_Body{ + Id: []byte("any_ID"), + OwnerId: &protorefs.OwnerID{Value: []byte("any_user")}, + Lifetime: &protosession.SessionToken_Body_TokenLifetime{Exp: 1, Nbf: 2, Iat: 3}, + SessionKey: []byte("any_session_key"), + }, + Signature: &protorefs.Signature{ + Key: []byte("any_key"), + Sign: []byte("any_signature"), + Scheme: protorefs.SignatureScheme(rand.Int31()), + }, + }, + } } - return respV2.ToGRPCMessage().(*protocontainer.GetExtendedACLResponse), nil + return x.signResponse(&resp) } type testSetEACLServer struct { protocontainer.UnimplementedContainerServiceServer + testCommonServerSettings[ + *protocontainer.SetExtendedACLRequest, + apicontainer.SetExtendedACLRequest, + *apicontainer.SetExtendedACLRequest, + protocontainer.SetExtendedACLResponse_Body, + protocontainer.SetExtendedACLResponse, + apicontainer.SetExtendedACLResponse, + *apicontainer.SetExtendedACLResponse, + ] + testContainerSessionServerSettings + testRFC6979DataSignatureServerSettings + reqEACL *eacl.Table } -func (x *testSetEACLServer) SetExtendedACL(context.Context, *protocontainer.SetExtendedACLRequest) (*protocontainer.SetExtendedACLResponse, error) { - var resp protocontainer.SetExtendedACLResponse +// returns [protocontainer.ContainerServiceServer] supporting SetExtendedACL +// method only. Default implementation performs common verification of any +// request, and responds with any valid message. Some methods allow to tune the +// behavior. +func newTestSetEACLServer() *testSetEACLServer { return new(testSetEACLServer) } + +// makes the server to assert that any request carries given eACL. By +// default, any valid eACL is accepted. +func (x *testSetEACLServer) checkRequestEACL(eACL eacl.Table) { x.reqEACL = &eACL } - var respV2 apicontainer.SetExtendedACLResponse - if err := respV2.FromGRPCMessage(&resp); err != nil { +func (x *testSetEACLServer) verifyRequest(req *protocontainer.SetExtendedACLRequest) error { + if err := x.testCommonServerSettings.verifyRequest(req); err != nil { + return err + } + // session token + if err := x.verifySessionToken(req.MetaHeader.SessionToken); err != nil { + return err + } + // body + body := req.Body + if body == nil { + return newInvalidRequestBodyErr(errors.New("missing body")) + } + // eACL + if body.Eacl == nil { + return newErrMissingRequestBodyField("eACL") + } + var eACLV2 v2acl.Table + if err := eACLV2.FromGRPCMessage(body.Eacl); err != nil { panic(err) } - if err := signServiceMessage(neofscryptotest.Signer(), &respV2, nil); err != nil { - return nil, fmt.Errorf("sign response message: %w", err) + var eACL eacl.Table + if err := eACL.ReadFromV2(eACLV2); err != nil { + return newErrInvalidRequestField("eACL", fmt.Errorf("invalid container: %w", err)) } + if x.reqEACL != nil && !bytes.Equal(eACL.Marshal(), x.reqEACL.Marshal()) { + return newErrInvalidRequestField("eACL", errors.New("eACL differs the parameterized one")) + } + // signature + return x.verifyDataSignature("eACL", eACL.Marshal(), body.Signature) +} - return respV2.ToGRPCMessage().(*protocontainer.SetExtendedACLResponse), nil +func (x *testSetEACLServer) SetExtendedACL(_ context.Context, req *protocontainer.SetExtendedACLRequest) (*protocontainer.SetExtendedACLResponse, error) { + if err := x.verifyRequest(req); err != nil { + return nil, err + } + + resp := protocontainer.SetExtendedACLResponse{ + MetaHeader: x.respMeta, + } + + return x.signResponse(&resp) } type testAnnounceContainerSpaceServer struct { protocontainer.UnimplementedContainerServiceServer + testCommonServerSettings[ + *protocontainer.AnnounceUsedSpaceRequest, + apicontainer.AnnounceUsedSpaceRequest, + *apicontainer.AnnounceUsedSpaceRequest, + protocontainer.AnnounceUsedSpaceRequest, + protocontainer.AnnounceUsedSpaceResponse, + apicontainer.AnnounceUsedSpaceResponse, + *apicontainer.AnnounceUsedSpaceResponse, + ] + reqAnnouncements []container.SizeEstimation +} + +// returns [protocontainer.ContainerServiceServer] supporting AnnounceUsedSpace +// method only. Default implementation performs common verification of any +// request, and responds with any valid message. Some methods allow to tune the +// behavior. +func newTestAnnounceContainerSpaceServer() *testAnnounceContainerSpaceServer { + return new(testAnnounceContainerSpaceServer) } -func (x *testAnnounceContainerSpaceServer) AnnounceUsedSpace(context.Context, *protocontainer.AnnounceUsedSpaceRequest) (*protocontainer.AnnounceUsedSpaceResponse, error) { - var resp protocontainer.AnnounceUsedSpaceResponse +// makes the server to assert that any request carries given announcements. By +// default, any valid values are accepted. +func (x *testAnnounceContainerSpaceServer) checkRequestAnnouncements(els []container.SizeEstimation) { + x.reqAnnouncements = els +} - var respV2 apicontainer.AnnounceUsedSpaceResponse - if err := respV2.FromGRPCMessage(&resp); err != nil { - panic(err) +func (x *testAnnounceContainerSpaceServer) verifyRequest(req *protocontainer.AnnounceUsedSpaceRequest) error { + if err := x.testCommonServerSettings.verifyRequest(req); err != nil { + return err + } + // body + body := req.Body + if body == nil { + return newInvalidRequestBodyErr(errors.New("missing body")) + } + // announcements + if len(body.Announcements) == 0 { + return newErrMissingRequestBodyField("announcements") } - if err := signServiceMessage(neofscryptotest.Signer(), &respV2, nil); err != nil { - return nil, fmt.Errorf("sign response message: %w", err) + es := make([]container.SizeEstimation, len(body.Announcements)) + for i := range body.Announcements { + var esV2 apicontainer.UsedSpaceAnnouncement + if err := esV2.FromGRPCMessage(body.Announcements[i]); err != nil { + panic(err) + } + if err := es[i].ReadFromV2(esV2); err != nil { + return newErrInvalidRequestField("announcements", fmt.Errorf("invalid element #%d: %w", i, err)) + } + } + if x.reqAnnouncements != nil && !assert.ObjectsAreEqual(x.reqAnnouncements, es) { + return newErrInvalidRequestField("announcements", errors.New("elements differ the parameterized ones")) + } + return nil +} + +func (x *testAnnounceContainerSpaceServer) AnnounceUsedSpace(_ context.Context, req *protocontainer.AnnounceUsedSpaceRequest) (*protocontainer.AnnounceUsedSpaceResponse, error) { + if err := x.verifyRequest(req); err != nil { + return nil, err } - return respV2.ToGRPCMessage().(*protocontainer.AnnounceUsedSpaceResponse), nil + resp := protocontainer.AnnounceUsedSpaceResponse{ + MetaHeader: x.respMeta, + } + + return x.signResponse(&resp) } -func TestClient_Container(t *testing.T) { +func TestClient_ContainerPut(t *testing.T) { c := newClient(t) ctx := context.Background() + var anyValidOpts PrmContainerPut + anyValidContainer := containertest.Container() + anyValidSigner := neofscryptotest.Signer().RFC6979 + anyID := cidtest.ID() + + t.Run("invalid user input", func(t *testing.T) { + t.Run("missing signer", func(t *testing.T) { + _, err := c.ContainerPut(ctx, anyValidContainer, nil, anyValidOpts) + require.ErrorIs(t, err, ErrMissingSigner) + }) + }) + t.Run("sign container failure", func(t *testing.T) { + t.Run("wrong scheme", func(t *testing.T) { + _, err := c.ContainerPut(ctx, anyValidContainer, neofsecdsa.Signer(neofscryptotest.ECDSAPrivateKey()), anyValidOpts) + require.EqualError(t, err, "calculate container signature: incorrect signer: expected ECDSA_DETERMINISTIC_SHA256 scheme") + }) + t.Run("signer failure", func(t *testing.T) { + _, err := c.ContainerPut(ctx, anyValidContainer, neofscryptotest.FailSigner(neofscryptotest.Signer()), anyValidOpts) + require.ErrorContains(t, err, "calculate container signature") + }) + }) + t.Run("exact in-out", func(t *testing.T) { + /* + This test is dedicated for cases when user input results in sending a certain + request to the server and receiving a specific response to it. For user input + errors, transport, client internals, etc. see/add other tests. + */ + t.Run("default", func(t *testing.T) { + signer := neofscryptotest.Signer() + srv := newTestPutContainerServer() + srv.checkRequestContainer(anyValidContainer) + srv.checkRequestDataSignerKey(signer.ECDSAPrivateKey) + srv.respondWithID(anyID[:]) + c := newTestContainerClient(t, srv) + + id, err := c.ContainerPut(ctx, anyValidContainer, signer.RFC6979, PrmContainerPut{}) + require.NoError(t, err) + require.Equal(t, anyID, id) + }) + t.Run("options", func(t *testing.T) { + t.Run("precalculated container signature", func(t *testing.T) { + var sig neofscrypto.Signature + sig.SetPublicKeyBytes([]byte("any public key")) + sig.SetValue([]byte("any value")) + srv := newTestPutContainerServer() + srv.checkRequestDataSignature(sig) + c := newTestContainerClient(t, srv) + + opts := anyValidOpts + opts.AttachSignature(sig) + _, err := c.ContainerPut(ctx, anyValidContainer, anyValidSigner, opts) + require.NoError(t, err) + }) + t.Run("X-headers", func(t *testing.T) { + xhdrs := []string{ + "x-key1", "x-val1", + "x-key2", "x-val2", + } + srv := newTestPutContainerServer() + srv.checkRequestXHeaders(xhdrs) + c := newTestContainerClient(t, srv) + + opts := anyValidOpts + opts.WithXHeaders(xhdrs...) + _, err := c.ContainerPut(ctx, anyValidContainer, anyValidSigner, opts) + require.NoError(t, err) + }) + t.Run("session token", func(t *testing.T) { + st := sessiontest.ContainerSigned(usertest.User()) + srv := newTestPutContainerServer() + srv.checkRequestSessionToken(st) + c := newTestContainerClient(t, srv) + + opts := anyValidOpts + opts.WithinSession(st) + _, err := c.ContainerPut(ctx, anyValidContainer, anyValidSigner, opts) + require.NoError(t, err) + }) + }) + t.Run("statuses", func(t *testing.T) { + srv := newTestPutContainerServer() + c := newTestContainerClient(t, srv) + + type customStatusTestcase struct { + msg string + detail *protostatus.Status_Detail + assert func(testing.TB, error) + } + for _, tc := range []struct { + code uint32 + err error + constErr error + custom []customStatusTestcase + }{ + // TODO: use const codes after transition to current module's proto lib + {code: 1024, err: new(apistatus.ServerInternal), constErr: apistatus.ErrServerInternal, custom: []customStatusTestcase{ + {msg: "some server failure", assert: func(t testing.TB, err error) { + var e *apistatus.ServerInternal + require.ErrorAs(t, err, &e) + require.Equal(t, "some server failure", e.Message()) + }}, + }}, + {code: 1025, err: new(apistatus.WrongMagicNumber), constErr: apistatus.ErrWrongMagicNumber, custom: []customStatusTestcase{ + {assert: func(t testing.TB, err error) { + var e *apistatus.WrongMagicNumber + require.ErrorAs(t, err, &e) + _, ok := e.CorrectMagic() + require.Zero(t, ok) + }}, + { + detail: &protostatus.Status_Detail{Id: 0, Value: []byte{140, 15, 162, 245, 219, 236, 37, 191}}, + assert: func(t testing.TB, err error) { + var e *apistatus.WrongMagicNumber + require.ErrorAs(t, err, &e) + magic, ok := e.CorrectMagic() + require.EqualValues(t, 1, ok) + require.EqualValues(t, uint64(10092464466800944575), magic) + }, + }, + { + detail: &protostatus.Status_Detail{Id: 0, Value: []byte{1, 2, 3}}, + assert: func(t testing.TB, err error) { + var e *apistatus.WrongMagicNumber + require.ErrorAs(t, err, &e) + _, ok := e.CorrectMagic() + require.EqualValues(t, -1, ok) + }, + }, + }}, + {code: 1026, err: new(apistatus.SignatureVerification), constErr: apistatus.ErrSignatureVerification, custom: []customStatusTestcase{ + {msg: "invalid request signature", assert: func(t testing.TB, err error) { + var e *apistatus.SignatureVerification + require.ErrorAs(t, err, &e) + require.Equal(t, "invalid request signature", e.Message()) + }}, + }}, + {code: 1027, err: new(apistatus.NodeUnderMaintenance), constErr: apistatus.ErrNodeUnderMaintenance, custom: []customStatusTestcase{ + {msg: "node is under maintenance", assert: func(t testing.TB, err error) { + var e *apistatus.NodeUnderMaintenance + require.ErrorAs(t, err, &e) + require.Equal(t, "node is under maintenance", e.Message()) + }}, + }}, + } { + st := &protostatus.Status{Code: tc.code} + srv.respondWithStatus(st) + + res, err := c.ContainerPut(ctx, anyValidContainer, anyValidSigner, anyValidOpts) + require.Zero(t, res) + require.ErrorAs(t, err, &tc.err) + require.ErrorIs(t, err, tc.constErr) + + for _, tcCustom := range tc.custom { + st.Message = tcCustom.msg + if tcCustom.detail != nil { + st.Details = []*protostatus.Status_Detail{tcCustom.detail} + } + srv.respondWithStatus(st) - t.Run("missing signer", func(t *testing.T) { - tt := []struct { - name string - methodCall func() error + _, err := c.ContainerPut(ctx, anyValidContainer, anyValidSigner, anyValidOpts) + require.ErrorAs(t, err, &tc.err) + tcCustom.assert(t, tc.err) + } + } + }) + }) + t.Run("sign request failure", func(t *testing.T) { + testSignRequestFailure(t, func(c *Client) error { + _, err := c.ContainerPut(ctx, anyValidContainer, anyValidSigner, anyValidOpts) + return err + }) + }) + t.Run("transport failure", func(t *testing.T) { + testTransportFailure(t, newTestPutContainerServer, newTestContainerClient, func(c *Client) error { + _, err := c.ContainerPut(ctx, anyValidContainer, anyValidSigner, anyValidOpts) + return err + }) + }) + t.Run("response message decoding failure", func(t *testing.T) { + testUnaryRPCResponseTypeMismatch(t, "container.ContainerService", "Put", func(c *Client) error { + _, err := c.ContainerPut(ctx, anyValidContainer, anyValidSigner, anyValidOpts) + return err + }) + }) + t.Run("invalid response verification header", func(t *testing.T) { + testInvalidResponseSignatures(t, newTestPutContainerServer, newTestContainerClient, func(c *Client) error { + _, err := c.ContainerPut(ctx, anyValidContainer, anyValidSigner, anyValidOpts) + return err + }) + }) + t.Run("invalid response body", func(t *testing.T) { + for _, tc := range []struct { + name string + body *protocontainer.PutResponse_Body + assertErr func(testing.TB, error) }{ - { - "put", - func() error { - _, err := c.ContainerPut(ctx, container.Container{}, nil, PrmContainerPut{}) - return err - }, - }, - { - "delete", - func() error { - return c.ContainerDelete(ctx, cid.ID{}, nil, PrmContainerDelete{}) - }, + {name: "missing", body: nil, assertErr: func(t testing.TB, err error) { + require.ErrorIs(t, err, MissingResponseFieldErr{}) + require.EqualError(t, err, "missing container ID field in the response") + // TODO: worth clarifying that body is completely missing? + }}, + {name: "empty", body: new(protocontainer.PutResponse_Body), assertErr: func(t testing.TB, err error) { + require.ErrorIs(t, err, MissingResponseFieldErr{}) + require.EqualError(t, err, "missing container ID field in the response") + }}, + {name: "ID/empty", body: &protocontainer.PutResponse_Body{ContainerId: new(protorefs.ContainerID)}, assertErr: func(t testing.TB, err error) { + require.EqualError(t, err, "invalid container ID field in the response: invalid length 0") + }}, + {name: "ID/undersize", body: &protocontainer.PutResponse_Body{ + ContainerId: &protorefs.ContainerID{Value: anyID[:31]}, + }, assertErr: func(t testing.TB, err error) { + require.EqualError(t, err, "invalid container ID field in the response: invalid length 31") + }}, + {name: "ID/oversize", body: &protocontainer.PutResponse_Body{ + ContainerId: &protorefs.ContainerID{Value: append(anyID[:], 1)}, + }, assertErr: func(t testing.TB, err error) { + require.EqualError(t, err, "invalid container ID field in the response: invalid length 33") + }}, + {name: "ID/zero", body: &protocontainer.PutResponse_Body{ + ContainerId: &protorefs.ContainerID{Value: make([]byte, 32)}, + }, assertErr: func(t testing.TB, err error) { + require.ErrorIs(t, err, cid.ErrZero) + require.ErrorContains(t, err, "invalid container ID field in the response") + }}, + } { + t.Run(tc.name, func(t *testing.T) { + srv := newTestPutContainerServer() + srv.respondWithBody(func() *protocontainer.PutResponse_Body { return tc.body }) + c := newTestContainerClient(t, srv) + + _, err := c.ContainerPut(ctx, anyValidContainer, anyValidSigner, anyValidOpts) + tc.assertErr(t, err) + }) + } + }) + t.Run("response callback", func(t *testing.T) { + testResponseCallback(t, newTestPutContainerServer, newDefaultContainerService, func(c *Client) error { + _, err := c.ContainerPut(ctx, anyValidContainer, anyValidSigner, anyValidOpts) + return err + }) + }) + t.Run("exec statistics", func(t *testing.T) { + testStatistic(t, newTestPutContainerServer, newDefaultContainerService, stat.MethodContainerPut, + []testedClientOp{func(c *Client) error { + _, err := c.ContainerPut(ctx, anyValidContainer, nil, anyValidOpts) + return err + }}, + []testedClientOp{func(c *Client) error { + _, err := c.ContainerPut(ctx, anyValidContainer, neofscryptotest.FailSigner(anyValidSigner), anyValidOpts) + return err + }}, func(c *Client) error { + _, err := c.ContainerPut(ctx, anyValidContainer, anyValidSigner, anyValidOpts) + return err }, - { - "set_eacl", - func() error { - return c.ContainerSetEACL(ctx, eacl.Table{}, nil, PrmContainerSetEACL{}) - }, + ) + }) +} + +func TestClient_ContainerGet(t *testing.T) { + ctx := context.Background() + var anyValidOpts PrmContainerGet + anyID := cidtest.ID() + + t.Run("exact in-out", func(t *testing.T) { + /* + This test is dedicated for cases when user input results in sending a certain + request to the server and receiving a specific response to it. For user input + errors, transport, client internals, etc. see/add other tests. + */ + t.Run("default", func(t *testing.T) { + t.Run("full", func(t *testing.T) { + srv := newTestGetContainerServer() + srv.checkRequestContainerID(anyID) + srv.respondWithContainer(proto.Clone(anyValidFullProtoContainer).(*protocontainer.Container)) + c := newTestContainerClient(t, srv) + + cnr, err := c.ContainerGet(ctx, anyID, PrmContainerGet{}) + require.NoError(t, err) + require.EqualValues(t, anyValidFullProtoContainer.Version.Major, cnr.Version().Major()) + require.EqualValues(t, anyValidFullProtoContainer.Version.Minor, cnr.Version().Minor()) + require.EqualValues(t, anyValidFullProtoContainer.OwnerId.Value, cnr.Owner()) + require.EqualValues(t, anyValidFullProtoContainer.BasicAcl, cnr.BasicACL().Bits()) + require.Equal(t, anyValidFullProtoContainer.Attributes[0].Value, cnr.Attribute(anyValidFullProtoContainer.Attributes[0].Key)) + require.Equal(t, anyValidFullProtoContainer.Attributes[1].Value, cnr.Attribute(anyValidFullProtoContainer.Attributes[1].Key)) + require.Equal(t, "any container name", cnr.Name()) + require.EqualValues(t, 1732577694, cnr.CreatedAt().Unix()) + require.Equal(t, "any domain name", cnr.ReadDomain().Name()) + require.Equal(t, "any domain zone", cnr.ReadDomain().Zone()) + require.True(t, cnr.IsHomomorphicHashingDisabled()) + policy := cnr.PlacementPolicy() + require.EqualValues(t, anyValidFullProtoContainer.PlacementPolicy.ContainerBackupFactor, policy.ContainerBackupFactor()) + rs := policy.Replicas() + require.Len(t, rs, len(anyValidFullProtoContainer.PlacementPolicy.Replicas)) + for i := range rs { + m := anyValidFullProtoContainer.PlacementPolicy.Replicas[i] + require.EqualValues(t, m.Count, rs[i].NumberOfObjects()) + require.Equal(t, m.Selector, rs[i].SelectorName()) + } + ss := policy.Selectors() + require.Len(t, ss, len(anyValidFullProtoContainer.PlacementPolicy.Selectors)) + for i := range ss { + m := anyValidFullProtoContainer.PlacementPolicy.Selectors[i] + require.Equal(t, m.Name, ss[i].Name()) + require.EqualValues(t, m.Count, ss[i].NumberOfNodes()) + switch m.Clause { + default: + require.False(t, ss[i].IsSame()) + require.False(t, ss[i].IsDistinct()) + case protonetmap.Clause_SAME: + require.True(t, ss[i].IsSame()) + require.False(t, ss[i].IsDistinct()) + case protonetmap.Clause_DISTINCT: + require.False(t, ss[i].IsSame()) + require.True(t, ss[i].IsDistinct()) + } + require.Equal(t, m.Attribute, ss[i].BucketAttribute()) + require.Equal(t, m.Filter, ss[i].FilterName()) + } + fs := policy.Filters() + require.Len(t, fs, len(anyValidFullProtoContainer.PlacementPolicy.Filters)) + for i, f := range fs { + m := anyValidFullProtoContainer.PlacementPolicy.Filters[i] + require.Equal(t, m.Name, f.Name()) + require.Equal(t, m.Key, f.Key()) + require.EqualValues(t, m.Value, f.Value()) + if i == 0 { + subs := fs[i].SubFilters() + require.Len(t, subs, len(m.Filters)) + for j, sub := range subs { + m := m.Filters[j] + require.Equal(t, m.Name, sub.Name()) + require.Equal(t, m.Key, sub.Key()) + require.EqualValues(t, m.Op, sub.Op()) + require.EqualValues(t, m.Value, sub.Value()) + require.Empty(t, sub.SubFilters()) + } + } else { + require.Empty(t, fs[i].SubFilters()) + } + } + for i, op := range []netmap.FilterOp{ + 0, + netmap.FilterOpEQ, + netmap.FilterOpNE, + netmap.FilterOpGT, + netmap.FilterOpGE, + netmap.FilterOpLT, + netmap.FilterOpLE, + netmap.FilterOpOR, + netmap.FilterOpAND, + math.MaxInt32, + } { + require.Equal(t, op, fs[i].Op()) + } + }) + t.Run("min", func(t *testing.T) { + srv := newTestGetContainerServer() + m := proto.Clone(anyValidMinProtoContainer).(*protocontainer.Container) + srv.respondWithContainer(m) + c := newTestContainerClient(t, srv) + + cnr, err := c.ContainerGet(ctx, anyID, PrmContainerGet{}) + require.NoError(t, err) + require.Zero(t, cnr.Version()) + require.EqualValues(t, anyValidMinProtoContainer.OwnerId.Value, cnr.Owner()) + require.Zero(t, cnr.BasicACL().Bits()) + cnr.IterateAttributes(func(key, val string) { t.Fatalf("unexpected attribute %q", key) }) + policy := cnr.PlacementPolicy() + require.Len(t, policy.Replicas(), 1) + require.Zero(t, policy.Replicas()[0]) + require.Zero(t, policy.ContainerBackupFactor()) + require.Empty(t, policy.Selectors()) + require.Empty(t, policy.Filters()) + }) + }) + t.Run("options", func(t *testing.T) { + t.Run("X-headers", func(t *testing.T) { + xhdrs := []string{ + "x-key1", "x-val1", + "x-key2", "x-val2", + } + srv := newTestGetContainerServer() + srv.checkRequestXHeaders(xhdrs) + c := newTestContainerClient(t, srv) + + opts := anyValidOpts + opts.WithXHeaders(xhdrs...) + _, err := c.ContainerGet(ctx, anyID, opts) + require.NoError(t, err) + }) + }) + t.Run("statuses", func(t *testing.T) { + srv := newTestGetContainerServer() + c := newTestContainerClient(t, srv) + + type customStatusTestcase struct { + msg string + detail *protostatus.Status_Detail + assert func(testing.TB, error) + } + for _, tc := range []struct { + code uint32 + err error + constErr error + custom []customStatusTestcase + }{ + // TODO: use const codes after transition to current module's proto lib + {code: 1024, err: new(apistatus.ServerInternal), constErr: apistatus.ErrServerInternal, custom: []customStatusTestcase{ + {msg: "some server failure", assert: func(t testing.TB, err error) { + var e *apistatus.ServerInternal + require.ErrorAs(t, err, &e) + require.Equal(t, "some server failure", e.Message()) + }}, + }}, + {code: 1025, err: new(apistatus.WrongMagicNumber), constErr: apistatus.ErrWrongMagicNumber, custom: []customStatusTestcase{ + {assert: func(t testing.TB, err error) { + var e *apistatus.WrongMagicNumber + require.ErrorAs(t, err, &e) + _, ok := e.CorrectMagic() + require.Zero(t, ok) + }}, + { + detail: &protostatus.Status_Detail{Id: 0, Value: []byte{140, 15, 162, 245, 219, 236, 37, 191}}, + assert: func(t testing.TB, err error) { + var e *apistatus.WrongMagicNumber + require.ErrorAs(t, err, &e) + magic, ok := e.CorrectMagic() + require.EqualValues(t, 1, ok) + require.EqualValues(t, uint64(10092464466800944575), magic) + }, + }, + { + detail: &protostatus.Status_Detail{Id: 0, Value: []byte{1, 2, 3}}, + assert: func(t testing.TB, err error) { + var e *apistatus.WrongMagicNumber + require.ErrorAs(t, err, &e) + _, ok := e.CorrectMagic() + require.EqualValues(t, -1, ok) + }, + }, + }}, + {code: 1026, err: new(apistatus.SignatureVerification), constErr: apistatus.ErrSignatureVerification, custom: []customStatusTestcase{ + {msg: "invalid request signature", assert: func(t testing.TB, err error) { + var e *apistatus.SignatureVerification + require.ErrorAs(t, err, &e) + require.Equal(t, "invalid request signature", e.Message()) + }}, + }}, + {code: 1027, err: new(apistatus.NodeUnderMaintenance), constErr: apistatus.ErrNodeUnderMaintenance, custom: []customStatusTestcase{ + {msg: "node is under maintenance", assert: func(t testing.TB, err error) { + var e *apistatus.NodeUnderMaintenance + require.ErrorAs(t, err, &e) + require.Equal(t, "node is under maintenance", e.Message()) + }}, + }}, + {code: 3072, err: new(apistatus.ContainerNotFound), constErr: apistatus.ErrContainerNotFound}, + } { + st := &protostatus.Status{Code: tc.code} + srv.respondWithStatus(st) + + res, err := c.ContainerGet(ctx, anyID, anyValidOpts) + require.Zero(t, res) + require.ErrorAs(t, err, &tc.err) + require.ErrorIs(t, err, tc.constErr) + + for _, tcCustom := range tc.custom { + st.Message = tcCustom.msg + if tcCustom.detail != nil { + st.Details = []*protostatus.Status_Detail{tcCustom.detail} + } + srv.respondWithStatus(st) + + _, err := c.ContainerGet(ctx, anyID, anyValidOpts) + require.ErrorAs(t, err, &tc.err) + tcCustom.assert(t, tc.err) + } + } + }) + }) + t.Run("sign request failure", func(t *testing.T) { + testSignRequestFailure(t, func(c *Client) error { + _, err := c.ContainerGet(ctx, anyID, anyValidOpts) + return err + }) + }) + t.Run("transport failure", func(t *testing.T) { + testTransportFailure(t, newTestGetContainerServer, newTestContainerClient, func(c *Client) error { + _, err := c.ContainerGet(ctx, anyID, anyValidOpts) + return err + }) + }) + t.Run("response message decoding failure", func(t *testing.T) { + testUnaryRPCResponseTypeMismatch(t, "container.ContainerService", "Get", func(c *Client) error { + _, err := c.ContainerGet(ctx, anyID, anyValidOpts) + return err + }) + }) + t.Run("invalid response verification header", func(t *testing.T) { + testInvalidResponseSignatures(t, newTestGetContainerServer, newTestContainerClient, func(c *Client) error { + _, err := c.ContainerGet(ctx, anyID, anyValidOpts) + return err + }) + }) + t.Run("invalid response body", func(t *testing.T) { + for _, tc := range []struct { + name string + body *protocontainer.GetResponse_Body + cnrFunc func() *protocontainer.Container + assertErr func(testing.TB, error) + }{ + {name: "missing", body: nil, assertErr: func(t testing.TB, err error) { + require.EqualError(t, err, "missing container in response") + // TODO: worth clarifying that body is completely missing? + }}, + {name: "empty", body: new(protocontainer.GetResponse_Body), assertErr: func(t testing.TB, err error) { + require.EqualError(t, err, "missing container in response") + }}, + {name: "container/missing version", cnrFunc: func() *protocontainer.Container { + m := proto.Clone(anyValidFullProtoContainer).(*protocontainer.Container) + m.Version = nil + return m + }, assertErr: func(t testing.TB, err error) { + require.EqualError(t, err, "invalid container in response: missing version") + }}, + {name: "container/missing owner", cnrFunc: func() *protocontainer.Container { + m := proto.Clone(anyValidFullProtoContainer).(*protocontainer.Container) + m.OwnerId = nil + return m + }, assertErr: func(t testing.TB, err error) { + require.EqualError(t, err, "invalid container in response: missing owner") + }}, + {name: "container/owner/empty", cnrFunc: func() *protocontainer.Container { + m := proto.Clone(anyValidFullProtoContainer).(*protocontainer.Container) + m.OwnerId = new(protorefs.OwnerID) + return m + }, assertErr: func(t testing.TB, err error) { + require.EqualError(t, err, "invalid container in response: invalid owner: invalid length 0, expected 25") + }}, + {name: "container/owner/undersize", cnrFunc: func() *protocontainer.Container { + m := proto.Clone(anyValidFullProtoContainer).(*protocontainer.Container) + m.OwnerId.Value = m.OwnerId.Value[:24] + return m + }, assertErr: func(t testing.TB, err error) { + require.EqualError(t, err, "invalid container in response: invalid owner: invalid length 24, expected 25") + }}, + {name: "container/owner/oversize", cnrFunc: func() *protocontainer.Container { + m := proto.Clone(anyValidFullProtoContainer).(*protocontainer.Container) + m.OwnerId.Value = append(m.OwnerId.Value, 1) + return m + }, assertErr: func(t testing.TB, err error) { + require.EqualError(t, err, "invalid container in response: invalid owner: invalid length 26, expected 25") + }}, + {name: "container/owner/wrong prefix", cnrFunc: func() *protocontainer.Container { + m := proto.Clone(anyValidFullProtoContainer).(*protocontainer.Container) + m.OwnerId.Value[0] = 0x42 + h := sha256.Sum256(m.OwnerId.Value[:21]) + hh := sha256.Sum256(h[:]) + copy(m.OwnerId.Value[21:], hh[:]) + return m + }, assertErr: func(t testing.TB, err error) { + require.EqualError(t, err, "invalid container in response: invalid owner: invalid prefix byte 0x42, expected 0x35") + }}, + {name: "container/owner/wrong checksum", cnrFunc: func() *protocontainer.Container { + m := proto.Clone(anyValidFullProtoContainer).(*protocontainer.Container) + m.OwnerId.Value[24]++ + return m + }, assertErr: func(t testing.TB, err error) { + require.EqualError(t, err, "invalid container in response: invalid owner: checksum mismatch") + }}, + {name: "container/owner/zero", cnrFunc: func() *protocontainer.Container { + m := proto.Clone(anyValidFullProtoContainer).(*protocontainer.Container) + m.OwnerId.Value = make([]byte, 25) + return m + }, assertErr: func(t testing.TB, err error) { + // TODO: better to return user.ErrZero in this case + require.ErrorContains(t, err, "invalid container in response: invalid owner: invalid prefix byte 0x0, expected 0x35") + }}, + {name: "container/missing nonce", cnrFunc: func() *protocontainer.Container { + m := proto.Clone(anyValidFullProtoContainer).(*protocontainer.Container) + m.Nonce = nil + return m + }, assertErr: func(t testing.TB, err error) { + require.EqualError(t, err, "invalid container in response: missing nonce") + }}, + {name: "container/nonce/undersize", cnrFunc: func() *protocontainer.Container { + m := proto.Clone(anyValidFullProtoContainer).(*protocontainer.Container) + m.Nonce = m.Nonce[:15] + return m + }, assertErr: func(t testing.TB, err error) { + require.EqualError(t, err, "invalid container in response: invalid nonce: invalid UUID (got 15 bytes)") + }}, + {name: "container/nonce/oversize", cnrFunc: func() *protocontainer.Container { + m := proto.Clone(anyValidFullProtoContainer).(*protocontainer.Container) + m.Nonce = append(m.Nonce, 1) + return m + }, assertErr: func(t testing.TB, err error) { + require.EqualError(t, err, "invalid container in response: invalid nonce: invalid UUID (got 17 bytes)") + }}, + {name: "container/nonce/wrong version", cnrFunc: func() *protocontainer.Container { + m := proto.Clone(anyValidFullProtoContainer).(*protocontainer.Container) + m.Nonce[6] = 3 << 4 + return m + }, assertErr: func(t testing.TB, err error) { + require.EqualError(t, err, "invalid container in response: invalid nonce UUID version 3") + }}, + {name: "container/attributes/empty key", cnrFunc: func() *protocontainer.Container { + m := proto.Clone(anyValidFullProtoContainer).(*protocontainer.Container) + m.Attributes[1].Key = "" + return m + }, assertErr: func(t testing.TB, err error) { + require.EqualError(t, err, "invalid container in response: empty attribute key") + }}, + {name: "container/attributes/empty value", cnrFunc: func() *protocontainer.Container { + m := proto.Clone(anyValidFullProtoContainer).(*protocontainer.Container) + m.Attributes[1].Value = "" + return m + }, assertErr: func(t testing.TB, err error) { + require.EqualError(t, err, "invalid container in response: empty attribute value k2") + }}, + {name: "container/attributes/duplicated", cnrFunc: func() *protocontainer.Container { + m := proto.Clone(anyValidFullProtoContainer).(*protocontainer.Container) + m.Attributes[2].Key = m.Attributes[0].Key + return m + }, assertErr: func(t testing.TB, err error) { + require.EqualError(t, err, "invalid container in response: duplicated attribute k1") + }}, + {name: "container/attributes/timestamp", cnrFunc: func() *protocontainer.Container { + m := proto.Clone(anyValidFullProtoContainer).(*protocontainer.Container) + m.Attributes[3].Value = "foo" + return m + }, assertErr: func(t testing.TB, err error) { + require.EqualError(t, err, `invalid container in response: invalid attribute value Timestamp: foo (strconv.ParseInt: parsing "foo": invalid syntax)`) + }}, + {name: "container/missing policy", cnrFunc: func() *protocontainer.Container { + m := proto.Clone(anyValidFullProtoContainer).(*protocontainer.Container) + m.PlacementPolicy = nil + return m + }, assertErr: func(t testing.TB, err error) { + require.EqualError(t, err, `invalid container in response: missing placement policy`) + }}, + {name: "container/policy/missing replicas", cnrFunc: func() *protocontainer.Container { + m := proto.Clone(anyValidFullProtoContainer).(*protocontainer.Container) + m.PlacementPolicy.Replicas = nil + return m + }, assertErr: func(t testing.TB, err error) { + require.EqualError(t, err, `invalid container in response: invalid placement policy: missing replicas`) + }}, + } { + t.Run(tc.name, func(t *testing.T) { + srv := newTestGetContainerServer() + if tc.cnrFunc != nil { + srv.respondWithContainer(tc.cnrFunc()) + } else { + srv.respondWithBody(func() *protocontainer.GetResponse_Body { return tc.body }) + } + c := newTestContainerClient(t, srv) + + _, err := c.ContainerGet(ctx, anyID, anyValidOpts) + tc.assertErr(t, err) + }) + } + }) + t.Run("response callback", func(t *testing.T) { + testResponseCallback(t, newTestGetContainerServer, newDefaultContainerService, func(c *Client) error { + _, err := c.ContainerGet(ctx, anyID, anyValidOpts) + return err + }) + }) + t.Run("exec statistics", func(t *testing.T) { + testStatistic(t, newTestGetContainerServer, newDefaultContainerService, stat.MethodContainerGet, + nil, nil, func(c *Client) error { + _, err := c.ContainerGet(ctx, anyID, anyValidOpts) + return err }, + ) + }) +} + +func TestClient_ContainerList(t *testing.T) { + ctx := context.Background() + var anyValidOpts PrmContainerList + anyUser := usertest.ID() + + t.Run("exact in-out", func(t *testing.T) { + /* + This test is dedicated for cases when user input results in sending a certain + request to the server and receiving a specific response to it. For user input + errors, transport, client internals, etc. see/add other tests. + */ + t.Run("default", func(t *testing.T) { + t.Run("full", func(t *testing.T) { + srv := newTestListContainersServer() + srv.checkOwner(anyUser) + ids := make([]*protorefs.ContainerID, len(anyValidProtoContainerIDs)) + for i := range anyValidProtoContainerIDs { + ids[i] = proto.Clone(anyValidProtoContainerIDs[i]).(*protorefs.ContainerID) + } + srv.respondWithIDs(ids) + c := newTestContainerClient(t, srv) + + res, err := c.ContainerList(ctx, anyUser, PrmContainerList{}) + require.NoError(t, err) + require.Len(t, res, len(ids)) + for i := range res { + require.EqualValues(t, ids[i].Value, res[i]) + } + }) + t.Run("min", func(t *testing.T) { + srv := newTestListContainersServer() + for i, fn := range []func(*testListContainersServer){ + func(srv *testListContainersServer) { + srv.respondWithBody(func() *protocontainer.ListResponse_Body { return nil }) + }, + func(srv *testListContainersServer) { + srv.respondWithBody(func() *protocontainer.ListResponse_Body { return new(protocontainer.ListResponse_Body) }) + }, + func(srv *testListContainersServer) { + srv.respondWithIDs(nil) + }, + } { + fn(srv) + c := newTestContainerClient(t, srv) + + res, err := c.ContainerList(ctx, anyUser, PrmContainerList{}) + require.NoError(t, err, i) + require.Empty(t, res, i) + } + }) + }) + t.Run("options", func(t *testing.T) { + t.Run("X-headers", func(t *testing.T) { + xhdrs := []string{ + "x-key1", "x-val1", + "x-key2", "x-val2", + } + srv := newTestListContainersServer() + srv.checkRequestXHeaders(xhdrs) + c := newTestContainerClient(t, srv) + + opts := anyValidOpts + opts.WithXHeaders(xhdrs...) + _, err := c.ContainerList(ctx, anyUser, opts) + require.NoError(t, err) + }) + }) + t.Run("statuses", func(t *testing.T) { + srv := newTestListContainersServer() + c := newTestContainerClient(t, srv) + + type customStatusTestcase struct { + msg string + detail *protostatus.Status_Detail + assert func(testing.TB, error) + } + for _, tc := range []struct { + code uint32 + err error + constErr error + custom []customStatusTestcase + }{ + // TODO: use const codes after transition to current module's proto lib + {code: 1024, err: new(apistatus.ServerInternal), constErr: apistatus.ErrServerInternal, custom: []customStatusTestcase{ + {msg: "some server failure", assert: func(t testing.TB, err error) { + var e *apistatus.ServerInternal + require.ErrorAs(t, err, &e) + require.Equal(t, "some server failure", e.Message()) + }}, + }}, + {code: 1025, err: new(apistatus.WrongMagicNumber), constErr: apistatus.ErrWrongMagicNumber, custom: []customStatusTestcase{ + {assert: func(t testing.TB, err error) { + var e *apistatus.WrongMagicNumber + require.ErrorAs(t, err, &e) + _, ok := e.CorrectMagic() + require.Zero(t, ok) + }}, + { + detail: &protostatus.Status_Detail{Id: 0, Value: []byte{140, 15, 162, 245, 219, 236, 37, 191}}, + assert: func(t testing.TB, err error) { + var e *apistatus.WrongMagicNumber + require.ErrorAs(t, err, &e) + magic, ok := e.CorrectMagic() + require.EqualValues(t, 1, ok) + require.EqualValues(t, uint64(10092464466800944575), magic) + }, + }, + { + detail: &protostatus.Status_Detail{Id: 0, Value: []byte{1, 2, 3}}, + assert: func(t testing.TB, err error) { + var e *apistatus.WrongMagicNumber + require.ErrorAs(t, err, &e) + _, ok := e.CorrectMagic() + require.EqualValues(t, -1, ok) + }, + }, + }}, + {code: 1026, err: new(apistatus.SignatureVerification), constErr: apistatus.ErrSignatureVerification, custom: []customStatusTestcase{ + {msg: "invalid request signature", assert: func(t testing.TB, err error) { + var e *apistatus.SignatureVerification + require.ErrorAs(t, err, &e) + require.Equal(t, "invalid request signature", e.Message()) + }}, + }}, + {code: 1027, err: new(apistatus.NodeUnderMaintenance), constErr: apistatus.ErrNodeUnderMaintenance, custom: []customStatusTestcase{ + {msg: "node is under maintenance", assert: func(t testing.TB, err error) { + var e *apistatus.NodeUnderMaintenance + require.ErrorAs(t, err, &e) + require.Equal(t, "node is under maintenance", e.Message()) + }}, + }}, + } { + st := &protostatus.Status{Code: tc.code} + srv.respondWithStatus(st) + + res, err := c.ContainerList(ctx, anyUser, anyValidOpts) + require.Zero(t, res) + require.ErrorAs(t, err, &tc.err) + require.ErrorIs(t, err, tc.constErr) + + for _, tcCustom := range tc.custom { + st.Message = tcCustom.msg + if tcCustom.detail != nil { + st.Details = []*protostatus.Status_Detail{tcCustom.detail} + } + srv.respondWithStatus(st) + + _, err := c.ContainerList(ctx, anyUser, anyValidOpts) + require.ErrorAs(t, err, &tc.err) + tcCustom.assert(t, tc.err) + } + } + }) + }) + t.Run("sign request failure", func(t *testing.T) { + testSignRequestFailure(t, func(c *Client) error { + _, err := c.ContainerList(ctx, anyUser, anyValidOpts) + return err + }) + }) + t.Run("transport failure", func(t *testing.T) { + testTransportFailure(t, newTestListContainersServer, newTestContainerClient, func(c *Client) error { + _, err := c.ContainerList(ctx, anyUser, anyValidOpts) + return err + }) + }) + t.Run("response message decoding failure", func(t *testing.T) { + testUnaryRPCResponseTypeMismatch(t, "container.ContainerService", "List", func(c *Client) error { + _, err := c.ContainerList(ctx, anyUser, anyValidOpts) + return err + }) + }) + t.Run("invalid response verification header", func(t *testing.T) { + testInvalidResponseSignatures(t, newTestListContainersServer, newTestContainerClient, func(c *Client) error { + _, err := c.ContainerList(ctx, anyUser, anyValidOpts) + return err + }) + }) + t.Run("invalid response body", func(t *testing.T) { + for _, tc := range []struct { + name string + ids []*protorefs.ContainerID + assertErr func(testing.TB, error) + }{ + {name: "ids/empty", ids: []*protorefs.ContainerID{ + {Value: anyValidProtoContainerIDs[0].Value}, + nil, + {Value: anyValidProtoContainerIDs[2].Value}, + }, assertErr: func(t testing.TB, err error) { + require.EqualError(t, err, "invalid ID in the response: invalid length 0") + }}, + {name: "ids/undersize", ids: []*protorefs.ContainerID{ + {Value: anyValidProtoContainerIDs[0].Value}, + {Value: anyValidProtoContainerIDs[1].Value[:31]}, + {Value: anyValidProtoContainerIDs[2].Value}, + }, assertErr: func(t testing.TB, err error) { + require.EqualError(t, err, "invalid ID in the response: invalid length 31") + }}, + {name: "ids/oversize", ids: []*protorefs.ContainerID{ + {Value: anyValidProtoContainerIDs[0].Value}, + {Value: append(anyValidProtoContainerIDs[1].Value, 1)}, + {Value: anyValidProtoContainerIDs[2].Value}, + }, assertErr: func(t testing.TB, err error) { + require.EqualError(t, err, "invalid ID in the response: invalid length 33") + }}, + {name: "ids/zero", ids: []*protorefs.ContainerID{ + {Value: anyValidProtoContainerIDs[0].Value}, + {Value: make([]byte, 32)}, + {Value: anyValidProtoContainerIDs[2].Value}, + }, assertErr: func(t testing.TB, err error) { + require.ErrorIs(t, err, cid.ErrZero) + require.ErrorContains(t, err, "invalid ID in the response") + }}, + } { + t.Run(tc.name, func(t *testing.T) { + srv := newTestListContainersServer() + srv.respondWithIDs(tc.ids) + c := newTestContainerClient(t, srv) + + _, err := c.ContainerList(ctx, anyUser, anyValidOpts) + tc.assertErr(t, err) + }) } + }) + t.Run("response callback", func(t *testing.T) { + testResponseCallback(t, newTestListContainersServer, newDefaultContainerService, func(c *Client) error { + _, err := c.ContainerList(ctx, anyUser, anyValidOpts) + return err + }) + }) + t.Run("exec statistics", func(t *testing.T) { + testStatistic(t, newTestListContainersServer, newDefaultContainerService, stat.MethodContainerList, + nil, nil, func(c *Client) error { + _, err := c.ContainerList(ctx, anyUser, anyValidOpts) + return err + }, + ) + }) +} + +func TestClient_ContainerDelete(t *testing.T) { + c := newClient(t) + ctx := context.Background() + var anyValidOpts PrmContainerDelete + anyValidSigner := neofscryptotest.Signer().RFC6979 + anyID := cidtest.ID() + + t.Run("invalid user input", func(t *testing.T) { + t.Run("missing signer", func(t *testing.T) { + err := c.ContainerDelete(ctx, anyID, nil, anyValidOpts) + require.ErrorIs(t, err, ErrMissingSigner) + }) + }) + t.Run("sign ID failure", func(t *testing.T) { + t.Run("wrong scheme", func(t *testing.T) { + err := c.ContainerDelete(ctx, anyID, neofsecdsa.Signer(neofscryptotest.ECDSAPrivateKey()), anyValidOpts) + // TODO: consider returning 'calculate ID signature' to distinguish from the request signatures + // FIXME: currently unchecked and request attempt is done. Better to pre-check like Put does + t.Skip() + require.EqualError(t, err, "calculate signature: incorrect signer: expected ECDSA_DETERMINISTIC_SHA256 scheme") + }) + t.Run("signer failure", func(t *testing.T) { + err := c.ContainerDelete(ctx, anyID, neofscryptotest.FailSigner(neofscryptotest.Signer()), anyValidOpts) + require.ErrorContains(t, err, "calculate signature") + }) + }) + t.Run("exact in-out", func(t *testing.T) { + /* + This test is dedicated for cases when user input results in sending a certain + request to the server and receiving a specific response to it. For user input + errors, transport, client internals, etc. see/add other tests. + */ + t.Run("default", func(t *testing.T) { + signer := neofscryptotest.Signer() + srv := newTestDeleteContainerServer() + srv.checkRequestContainerID(anyID) + srv.checkRequestDataSignerKey(signer.ECDSAPrivateKey) + c := newTestContainerClient(t, srv) + + err := c.ContainerDelete(ctx, anyID, signer.RFC6979, PrmContainerDelete{}) + require.NoError(t, err) + }) + t.Run("options", func(t *testing.T) { + t.Run("precalculated container signature", func(t *testing.T) { + var sig neofscrypto.Signature + sig.SetPublicKeyBytes([]byte("any public key")) + sig.SetValue([]byte("any value")) + srv := newTestDeleteContainerServer() + srv.checkRequestDataSignature(sig) + c := newTestContainerClient(t, srv) + + opts := anyValidOpts + opts.AttachSignature(sig) + err := c.ContainerDelete(ctx, anyID, anyValidSigner, opts) + require.NoError(t, err) + }) + t.Run("X-headers", func(t *testing.T) { + xhdrs := []string{ + "x-key1", "x-val1", + "x-key2", "x-val2", + } + srv := newTestDeleteContainerServer() + srv.checkRequestXHeaders(xhdrs) + c := newTestContainerClient(t, srv) + + opts := anyValidOpts + opts.WithXHeaders(xhdrs...) + err := c.ContainerDelete(ctx, anyID, anyValidSigner, opts) + require.NoError(t, err) + }) + t.Run("session token", func(t *testing.T) { + st := sessiontest.ContainerSigned(usertest.User()) + srv := newTestDeleteContainerServer() + srv.checkRequestSessionToken(st) + c := newTestContainerClient(t, srv) + + opts := anyValidOpts + opts.WithinSession(st) + err := c.ContainerDelete(ctx, anyID, anyValidSigner, opts) + require.NoError(t, err) + }) + }) + t.Run("statuses", func(t *testing.T) { + srv := newTestDeleteContainerServer() + c := newTestContainerClient(t, srv) + + type customStatusTestcase struct { + msg string + detail *protostatus.Status_Detail + assert func(testing.TB, error) + } + for _, tc := range []struct { + code uint32 + err error + constErr error + custom []customStatusTestcase + }{ + // TODO: use const codes after transition to current module's proto lib + {code: 1024, err: new(apistatus.ServerInternal), constErr: apistatus.ErrServerInternal, custom: []customStatusTestcase{ + {msg: "some server failure", assert: func(t testing.TB, err error) { + var e *apistatus.ServerInternal + require.ErrorAs(t, err, &e) + require.Equal(t, "some server failure", e.Message()) + }}, + }}, + {code: 1025, err: new(apistatus.WrongMagicNumber), constErr: apistatus.ErrWrongMagicNumber, custom: []customStatusTestcase{ + {assert: func(t testing.TB, err error) { + var e *apistatus.WrongMagicNumber + require.ErrorAs(t, err, &e) + _, ok := e.CorrectMagic() + require.Zero(t, ok) + }}, + { + detail: &protostatus.Status_Detail{Id: 0, Value: []byte{140, 15, 162, 245, 219, 236, 37, 191}}, + assert: func(t testing.TB, err error) { + var e *apistatus.WrongMagicNumber + require.ErrorAs(t, err, &e) + magic, ok := e.CorrectMagic() + require.EqualValues(t, 1, ok) + require.EqualValues(t, uint64(10092464466800944575), magic) + }, + }, + { + detail: &protostatus.Status_Detail{Id: 0, Value: []byte{1, 2, 3}}, + assert: func(t testing.TB, err error) { + var e *apistatus.WrongMagicNumber + require.ErrorAs(t, err, &e) + _, ok := e.CorrectMagic() + require.EqualValues(t, -1, ok) + }, + }, + }}, + {code: 1026, err: new(apistatus.SignatureVerification), constErr: apistatus.ErrSignatureVerification, custom: []customStatusTestcase{ + {msg: "invalid request signature", assert: func(t testing.TB, err error) { + var e *apistatus.SignatureVerification + require.ErrorAs(t, err, &e) + require.Equal(t, "invalid request signature", e.Message()) + }}, + }}, + {code: 1027, err: new(apistatus.NodeUnderMaintenance), constErr: apistatus.ErrNodeUnderMaintenance, custom: []customStatusTestcase{ + {msg: "node is under maintenance", assert: func(t testing.TB, err error) { + var e *apistatus.NodeUnderMaintenance + require.ErrorAs(t, err, &e) + require.Equal(t, "node is under maintenance", e.Message()) + }}, + }}, + } { + st := &protostatus.Status{Code: tc.code} + srv.respondWithStatus(st) + + err := c.ContainerDelete(ctx, anyID, anyValidSigner, anyValidOpts) + require.ErrorAs(t, err, &tc.err) + require.ErrorIs(t, err, tc.constErr) + + for _, tcCustom := range tc.custom { + st.Message = tcCustom.msg + if tcCustom.detail != nil { + st.Details = []*protostatus.Status_Detail{tcCustom.detail} + } + srv.respondWithStatus(st) - for _, test := range tt { - t.Run(test.name, func(t *testing.T) { - require.ErrorIs(t, test.methodCall(), ErrMissingSigner) + err := c.ContainerDelete(ctx, anyID, anyValidSigner, anyValidOpts) + require.ErrorAs(t, err, &tc.err) + tcCustom.assert(t, tc.err) + } + } + }) + }) + t.Run("sign request failure", func(t *testing.T) { + testSignRequestFailure(t, func(c *Client) error { + return c.ContainerDelete(ctx, anyID, anyValidSigner, anyValidOpts) + }) + }) + t.Run("transport failure", func(t *testing.T) { + testTransportFailure(t, newTestDeleteContainerServer, newTestContainerClient, func(c *Client) error { + return c.ContainerDelete(ctx, anyID, anyValidSigner, anyValidOpts) + }) + }) + t.Run("response message decoding failure", func(t *testing.T) { + testUnaryRPCResponseTypeMismatch(t, "container.ContainerService", "Delete", func(c *Client) error { + return c.ContainerDelete(ctx, anyID, anyValidSigner, anyValidOpts) + }) + }) + t.Run("invalid response verification header", func(t *testing.T) { + testInvalidResponseSignatures(t, newTestDeleteContainerServer, newTestContainerClient, func(c *Client) error { + return c.ContainerDelete(ctx, anyID, anyValidSigner, anyValidOpts) + }) + }) + t.Run("response callback", func(t *testing.T) { + testResponseCallback(t, newTestDeleteContainerServer, newDefaultContainerService, func(c *Client) error { + return c.ContainerDelete(ctx, anyID, anyValidSigner, anyValidOpts) + }) + }) + t.Run("exec statistics", func(t *testing.T) { + testStatistic(t, newTestDeleteContainerServer, newDefaultContainerService, stat.MethodContainerDelete, + []testedClientOp{func(c *Client) error { + return c.ContainerDelete(ctx, anyID, nil, anyValidOpts) + }}, []testedClientOp{func(c *Client) error { + return c.ContainerDelete(ctx, anyID, neofscryptotest.FailSigner(anyValidSigner), anyValidOpts) + }}, func(c *Client) error { + return c.ContainerDelete(ctx, anyID, anyValidSigner, anyValidOpts) + }, + ) + }) +} + +func TestClient_ContainerEACL(t *testing.T) { + ctx := context.Background() + var anyValidOpts PrmContainerEACL + anyID := cidtest.ID() + + t.Run("exact in-out", func(t *testing.T) { + /* + This test is dedicated for cases when user input results in sending a certain + request to the server and receiving a specific response to it. For user input + errors, transport, client internals, etc. see/add other tests. + */ + t.Run("default", func(t *testing.T) { + t.Run("full", func(t *testing.T) { + srv := newTestGetEACLServer() + srv.checkRequestContainerID(anyID) + srv.respondWithEACL(proto.Clone(anyValidFullEACL).(*protoacl.EACLTable)) + c := newTestContainerClient(t, srv) + + eACL, err := c.ContainerEACL(ctx, anyID, PrmContainerEACL{}) + require.NoError(t, err) + ver := eACL.Version() + require.EqualValues(t, anyValidFullEACL.Version.Major, ver.Major()) + require.EqualValues(t, anyValidFullEACL.Version.Minor, ver.Minor()) + cnr := eACL.GetCID() + require.EqualValues(t, anyValidFullEACL.ContainerId.Value, cnr) + + rs := eACL.Records() + require.Len(t, rs, 11) + for _, r := range rs[:9] { + require.Empty(t, r.Filters()) + require.Empty(t, r.Targets()) + } + require.Empty(t, rs[9].Targets()) + require.Empty(t, rs[10].Filters()) + for i, a := range []eacl.Operation{ + 0, + eacl.OperationGet, + eacl.OperationHead, + eacl.OperationPut, + eacl.OperationDelete, + eacl.OperationSearch, + eacl.OperationRange, + eacl.OperationRangeHash, + // FIXME: uncomment after https://github.com/nspcc-dev/neofs-sdk-go/issues/606 + // math.MaxInt32, + } { + require.Equal(t, a, rs[i].Operation()) + } + for i, a := range []eacl.Action{ + 0, + eacl.ActionAllow, + eacl.ActionDeny, + // FIXME: uncomment after https://github.com/nspcc-dev/neofs-sdk-go/issues/606 + // math.MaxInt32, + } { + require.Equal(t, a, rs[i].Action()) + } + + fs := rs[9].Filters() + require.Len(t, fs, 9) + for i, f := range fs { + mf := anyValidFullEACL.Records[9].Filters[i] + require.Equal(t, mf.Key, f.Key()) + require.Equal(t, mf.Value, f.Value()) + } + for i, typ := range []eacl.FilterHeaderType{ + 0, + eacl.HeaderFromRequest, + eacl.HeaderFromObject, + eacl.HeaderFromService, + // FIXME: uncomment after https://github.com/nspcc-dev/neofs-sdk-go/issues/606 + // math.MaxInt32, + } { + require.Equal(t, typ, fs[i].From()) + } + for i, m := range []eacl.Match{ + 0, + eacl.MatchStringEqual, + eacl.MatchStringNotEqual, + eacl.MatchNotPresent, + eacl.MatchNumGT, + eacl.MatchNumGE, + eacl.MatchNumLT, + eacl.MatchNumLE, + // FIXME: uncomment after https://github.com/nspcc-dev/neofs-sdk-go/issues/606 + // math.MaxInt32, + } { + require.Equal(t, m, fs[i].Matcher()) + } + + ts := rs[10].Targets() + require.Len(t, ts, 5) + for i, tgt := range ts { + mt := anyValidFullEACL.Records[10].Targets[i] + require.Equal(t, mt.Keys, tgt.RawSubjects()) + } + for i, r := range []eacl.Role{ + 0, + eacl.RoleUser, + eacl.RoleSystem, + eacl.RoleOthers, + // FIXME: uncomment after https://github.com/nspcc-dev/neofs-sdk-go/issues/606 + // math.MaxInt32, + } { + require.Equal(t, r, ts[i].Role()) + } + }) + t.Run("min", func(t *testing.T) { + srv := newTestGetEACLServer() + srv.respondWithEACL(proto.Clone(anyValidMinEACL).(*protoacl.EACLTable)) + c := newTestContainerClient(t, srv) + + eACL, err := c.ContainerEACL(ctx, anyID, PrmContainerEACL{}) + require.NoError(t, err) + require.Zero(t, eACL.Version()) + require.True(t, eACL.GetCID().IsZero()) + require.Empty(t, eACL.Records()) + }) + }) + t.Run("options", func(t *testing.T) { + t.Run("X-headers", func(t *testing.T) { + xhdrs := []string{ + "x-key1", "x-val1", + "x-key2", "x-val2", + } + srv := newTestGetEACLServer() + srv.checkRequestXHeaders(xhdrs) + c := newTestContainerClient(t, srv) + + opts := anyValidOpts + opts.WithXHeaders(xhdrs...) + _, err := c.ContainerEACL(ctx, anyID, opts) + require.NoError(t, err) + }) + }) + t.Run("statuses", func(t *testing.T) { + srv := newTestGetEACLServer() + c := newTestContainerClient(t, srv) + + type customStatusTestcase struct { + msg string + detail *protostatus.Status_Detail + assert func(testing.TB, error) + } + for _, tc := range []struct { + code uint32 + err error + constErr error + custom []customStatusTestcase + }{ + // TODO: use const codes after transition to current module's proto lib + {code: 1024, err: new(apistatus.ServerInternal), constErr: apistatus.ErrServerInternal, custom: []customStatusTestcase{ + {msg: "some server failure", assert: func(t testing.TB, err error) { + var e *apistatus.ServerInternal + require.ErrorAs(t, err, &e) + require.Equal(t, "some server failure", e.Message()) + }}, + }}, + {code: 1025, err: new(apistatus.WrongMagicNumber), constErr: apistatus.ErrWrongMagicNumber, custom: []customStatusTestcase{ + {assert: func(t testing.TB, err error) { + var e *apistatus.WrongMagicNumber + require.ErrorAs(t, err, &e) + _, ok := e.CorrectMagic() + require.Zero(t, ok) + }}, + { + detail: &protostatus.Status_Detail{Id: 0, Value: []byte{140, 15, 162, 245, 219, 236, 37, 191}}, + assert: func(t testing.TB, err error) { + var e *apistatus.WrongMagicNumber + require.ErrorAs(t, err, &e) + magic, ok := e.CorrectMagic() + require.EqualValues(t, 1, ok) + require.EqualValues(t, uint64(10092464466800944575), magic) + }, + }, + { + detail: &protostatus.Status_Detail{Id: 0, Value: []byte{1, 2, 3}}, + assert: func(t testing.TB, err error) { + var e *apistatus.WrongMagicNumber + require.ErrorAs(t, err, &e) + _, ok := e.CorrectMagic() + require.EqualValues(t, -1, ok) + }, + }, + }}, + {code: 1026, err: new(apistatus.SignatureVerification), constErr: apistatus.ErrSignatureVerification, custom: []customStatusTestcase{ + {msg: "invalid request signature", assert: func(t testing.TB, err error) { + var e *apistatus.SignatureVerification + require.ErrorAs(t, err, &e) + require.Equal(t, "invalid request signature", e.Message()) + }}, + }}, + {code: 1027, err: new(apistatus.NodeUnderMaintenance), constErr: apistatus.ErrNodeUnderMaintenance, custom: []customStatusTestcase{ + {msg: "node is under maintenance", assert: func(t testing.TB, err error) { + var e *apistatus.NodeUnderMaintenance + require.ErrorAs(t, err, &e) + require.Equal(t, "node is under maintenance", e.Message()) + }}, + }}, + {code: 3072, err: new(apistatus.ContainerNotFound), constErr: apistatus.ErrContainerNotFound}, + {code: 3073, err: new(apistatus.EACLNotFound), constErr: apistatus.ErrEACLNotFound}, + } { + st := &protostatus.Status{Code: tc.code} + srv.respondWithStatus(st) + + res, err := c.ContainerEACL(ctx, anyID, anyValidOpts) + require.Zero(t, res) + require.ErrorAs(t, err, &tc.err) + require.ErrorIs(t, err, tc.constErr) + + for _, tcCustom := range tc.custom { + st.Message = tcCustom.msg + if tcCustom.detail != nil { + st.Details = []*protostatus.Status_Detail{tcCustom.detail} + } + srv.respondWithStatus(st) + + _, err := c.ContainerEACL(ctx, anyID, anyValidOpts) + require.ErrorAs(t, err, &tc.err) + tcCustom.assert(t, tc.err) + } + } + }) + }) + t.Run("sign request failure", func(t *testing.T) { + testSignRequestFailure(t, func(c *Client) error { + _, err := c.ContainerEACL(ctx, anyID, anyValidOpts) + return err + }) + }) + t.Run("transport failure", func(t *testing.T) { + testTransportFailure(t, newTestGetEACLServer, newTestContainerClient, func(c *Client) error { + _, err := c.ContainerEACL(ctx, anyID, anyValidOpts) + return err + }) + }) + t.Run("response message decoding failure", func(t *testing.T) { + testUnaryRPCResponseTypeMismatch(t, "container.ContainerService", "GetExtendedACL", func(c *Client) error { + _, err := c.ContainerEACL(ctx, anyID, anyValidOpts) + return err + }) + }) + t.Run("invalid response verification header", func(t *testing.T) { + testInvalidResponseSignatures(t, newTestGetEACLServer, newTestContainerClient, func(c *Client) error { + _, err := c.ContainerEACL(ctx, anyID, anyValidOpts) + return err + }) + }) + t.Run("invalid response body", func(t *testing.T) { + for _, tc := range []struct { + name string + body *protocontainer.GetExtendedACLResponse_Body + eaclFunc func() *protoacl.EACLTable + assertErr func(testing.TB, error) + }{ + {name: "missing", body: nil, assertErr: func(t testing.TB, err error) { + require.ErrorIs(t, err, MissingResponseFieldErr{}) + require.EqualError(t, err, "missing eACL field in the response") + // TODO: worth clarifying that body is completely missing? + }}, + {name: "empty", body: new(protocontainer.GetExtendedACLResponse_Body), assertErr: func(t testing.TB, err error) { + require.ErrorIs(t, err, MissingResponseFieldErr{}) + require.EqualError(t, err, "missing eACL field in the response") + }}, + {name: "eacl/container/empty", eaclFunc: func() *protoacl.EACLTable { + m := proto.Clone(anyValidFullEACL).(*protoacl.EACLTable) + m.ContainerId = new(protorefs.ContainerID) + return m + }, assertErr: func(t testing.TB, err error) { + require.EqualError(t, err, "invalid eACL field in the response: invalid container ID: invalid length 0") + }}, + {name: "eacl/container/undersize", eaclFunc: func() *protoacl.EACLTable { + m := proto.Clone(anyValidFullEACL).(*protoacl.EACLTable) + m.ContainerId = &protorefs.ContainerID{Value: anyValidFullEACL.ContainerId.Value[:31]} + return m + }, assertErr: func(t testing.TB, err error) { + require.EqualError(t, err, "invalid eACL field in the response: invalid container ID: invalid length 31") + }}, + {name: "eacl/container/oversize", eaclFunc: func() *protoacl.EACLTable { + m := proto.Clone(anyValidFullEACL).(*protoacl.EACLTable) + m.ContainerId = &protorefs.ContainerID{Value: append(anyValidFullEACL.ContainerId.Value, 1)} + return m + }, assertErr: func(t testing.TB, err error) { + require.EqualError(t, err, "invalid eACL field in the response: invalid container ID: invalid length 33") + }}, + {name: "eacl/container/zero", eaclFunc: func() *protoacl.EACLTable { + m := proto.Clone(anyValidFullEACL).(*protoacl.EACLTable) + m.ContainerId = &protorefs.ContainerID{Value: make([]byte, 32)} + return m + }, assertErr: func(t testing.TB, err error) { + require.ErrorIs(t, err, cid.ErrZero) + require.ErrorContains(t, err, "invalid eACL field in the response") + }}, + } { + t.Run(tc.name, func(t *testing.T) { + srv := newTestGetEACLServer() + if tc.eaclFunc != nil { + srv.respondWithEACL(tc.eaclFunc()) + } else { + srv.respondWithBody(func() *protocontainer.GetExtendedACLResponse_Body { return tc.body }) + } + c := newTestContainerClient(t, srv) + + _, err := c.ContainerEACL(ctx, anyID, anyValidOpts) + tc.assertErr(t, err) }) } }) + t.Run("response callback", func(t *testing.T) { + testResponseCallback(t, newTestGetEACLServer, newDefaultContainerService, func(c *Client) error { + _, err := c.ContainerEACL(ctx, anyID, anyValidOpts) + return err + }) + }) + t.Run("exec statistics", func(t *testing.T) { + testStatistic(t, newTestGetEACLServer, newDefaultContainerService, stat.MethodContainerEACL, + nil, nil, func(c *Client) error { + _, err := c.ContainerEACL(ctx, anyID, anyValidOpts) + return err + }, + ) + }) +} + +func TestClient_ContainerSetEACL(t *testing.T) { + c := newClient(t) + ctx := context.Background() + var anyValidOpts PrmContainerSetEACL + anyValidSigner := usertest.User().RFC6979 + // TODO: use eacltest.Table() after https://github.com/nspcc-dev/neofs-sdk-go/issues/606 + anyValidEACL := eacl.NewTableForContainer(cidtest.ID(), []eacl.Record{ + eacl.ConstructRecord(eacl.ActionDeny, eacl.OperationPut, + []eacl.Target{ + eacl.NewTargetByRole(eacl.RoleOthers), + eacl.NewTargetByAccounts(usertest.IDs(3)), + }, + eacl.NewFilterObjectOwnerEquals(usertest.ID()), + eacl.NewObjectPropertyFilter("attr1", eacl.MatchStringEqual, "val1"), + ), + }) + + t.Run("invalid user input", func(t *testing.T) { + t.Run("missing signer", func(t *testing.T) { + err := c.ContainerSetEACL(ctx, anyValidEACL, nil, anyValidOpts) + require.ErrorIs(t, err, ErrMissingSigner) + }) + t.Run("missing container ID in eACL", func(t *testing.T) { + eACL := anyValidEACL + eACL.SetCID(cid.ID{}) + err := c.ContainerSetEACL(ctx, eACL, anyValidSigner, anyValidOpts) + require.ErrorIs(t, err, ErrMissingEACLContainer) + }) + }) + t.Run("sign container failure", func(t *testing.T) { + t.Run("wrong scheme", func(t *testing.T) { + err := c.ContainerSetEACL(ctx, anyValidEACL, user.NewAutoIDSigner(neofscryptotest.ECDSAPrivateKey()), anyValidOpts) + // TODO: consider returning 'calculate eACL signature' to distinguish from the request signatures + // FIXME: currently unchecked and request attempt is done. Better to pre-check like Put does + t.Skip() + require.EqualError(t, err, "calculate signature: incorrect signer: expected ECDSA_DETERMINISTIC_SHA256 scheme") + }) + t.Run("signer failure", func(t *testing.T) { + err := c.ContainerSetEACL(ctx, anyValidEACL, usertest.FailSigner(usertest.User()), anyValidOpts) + require.ErrorContains(t, err, "calculate signature") + }) + }) + t.Run("exact in-out", func(t *testing.T) { + /* + This test is dedicated for cases when user input results in sending a certain + request to the server and receiving a specific response to it. For user input + errors, transport, client internals, etc. see/add other tests. + */ + t.Run("default", func(t *testing.T) { + signer := usertest.User() + srv := newTestSetEACLServer() + srv.checkRequestEACL(anyValidEACL) + srv.checkRequestDataSignerKey(signer.ECDSAPrivateKey) + c := newTestContainerClient(t, srv) + + err := c.ContainerSetEACL(ctx, anyValidEACL, signer.RFC6979, PrmContainerSetEACL{}) + require.NoError(t, err) + }) + t.Run("options", func(t *testing.T) { + t.Run("precalculated container signature", func(t *testing.T) { + var sig neofscrypto.Signature + sig.SetPublicKeyBytes([]byte("any public key")) + sig.SetValue([]byte("any value")) + srv := newTestSetEACLServer() + srv.checkRequestDataSignature(sig) + c := newTestContainerClient(t, srv) + + opts := anyValidOpts + opts.AttachSignature(sig) + err := c.ContainerSetEACL(ctx, anyValidEACL, anyValidSigner, opts) + require.NoError(t, err) + }) + t.Run("X-headers", func(t *testing.T) { + xhdrs := []string{ + "x-key1", "x-val1", + "x-key2", "x-val2", + } + srv := newTestSetEACLServer() + srv.checkRequestXHeaders(xhdrs) + c := newTestContainerClient(t, srv) + + opts := anyValidOpts + opts.WithXHeaders(xhdrs...) + err := c.ContainerSetEACL(ctx, anyValidEACL, anyValidSigner, opts) + require.NoError(t, err) + }) + t.Run("session token", func(t *testing.T) { + st := sessiontest.ContainerSigned(usertest.User()) + srv := newTestSetEACLServer() + srv.checkRequestSessionToken(st) + c := newTestContainerClient(t, srv) + + opts := anyValidOpts + opts.WithinSession(st) + err := c.ContainerSetEACL(ctx, anyValidEACL, anyValidSigner, opts) + require.NoError(t, err) + }) + }) + t.Run("statuses", func(t *testing.T) { + srv := newTestSetEACLServer() + c := newTestContainerClient(t, srv) + + type customStatusTestcase struct { + msg string + detail *protostatus.Status_Detail + assert func(testing.TB, error) + } + for _, tc := range []struct { + code uint32 + err error + constErr error + custom []customStatusTestcase + }{ + // TODO: use const codes after transition to current module's proto lib + {code: 1024, err: new(apistatus.ServerInternal), constErr: apistatus.ErrServerInternal, custom: []customStatusTestcase{ + {msg: "some server failure", assert: func(t testing.TB, err error) { + var e *apistatus.ServerInternal + require.ErrorAs(t, err, &e) + require.Equal(t, "some server failure", e.Message()) + }}, + }}, + {code: 1025, err: new(apistatus.WrongMagicNumber), constErr: apistatus.ErrWrongMagicNumber, custom: []customStatusTestcase{ + {assert: func(t testing.TB, err error) { + var e *apistatus.WrongMagicNumber + require.ErrorAs(t, err, &e) + _, ok := e.CorrectMagic() + require.Zero(t, ok) + }}, + { + detail: &protostatus.Status_Detail{Id: 0, Value: []byte{140, 15, 162, 245, 219, 236, 37, 191}}, + assert: func(t testing.TB, err error) { + var e *apistatus.WrongMagicNumber + require.ErrorAs(t, err, &e) + magic, ok := e.CorrectMagic() + require.EqualValues(t, 1, ok) + require.EqualValues(t, uint64(10092464466800944575), magic) + }, + }, + { + detail: &protostatus.Status_Detail{Id: 0, Value: []byte{1, 2, 3}}, + assert: func(t testing.TB, err error) { + var e *apistatus.WrongMagicNumber + require.ErrorAs(t, err, &e) + _, ok := e.CorrectMagic() + require.EqualValues(t, -1, ok) + }, + }, + }}, + {code: 1026, err: new(apistatus.SignatureVerification), constErr: apistatus.ErrSignatureVerification, custom: []customStatusTestcase{ + {msg: "invalid request signature", assert: func(t testing.TB, err error) { + var e *apistatus.SignatureVerification + require.ErrorAs(t, err, &e) + require.Equal(t, "invalid request signature", e.Message()) + }}, + }}, + {code: 1027, err: new(apistatus.NodeUnderMaintenance), constErr: apistatus.ErrNodeUnderMaintenance, custom: []customStatusTestcase{ + {msg: "node is under maintenance", assert: func(t testing.TB, err error) { + var e *apistatus.NodeUnderMaintenance + require.ErrorAs(t, err, &e) + require.Equal(t, "node is under maintenance", e.Message()) + }}, + }}, + } { + st := &protostatus.Status{Code: tc.code} + srv.respondWithStatus(st) + + err := c.ContainerSetEACL(ctx, anyValidEACL, anyValidSigner, anyValidOpts) + require.ErrorAs(t, err, &tc.err) + require.ErrorIs(t, err, tc.constErr) + + for _, tcCustom := range tc.custom { + st.Message = tcCustom.msg + if tcCustom.detail != nil { + st.Details = []*protostatus.Status_Detail{tcCustom.detail} + } + srv.respondWithStatus(st) + + err := c.ContainerSetEACL(ctx, anyValidEACL, anyValidSigner, anyValidOpts) + require.ErrorAs(t, err, &tc.err) + tcCustom.assert(t, tc.err) + } + } + }) + }) + t.Run("sign request failure", func(t *testing.T) { + testSignRequestFailure(t, func(c *Client) error { + return c.ContainerSetEACL(ctx, anyValidEACL, anyValidSigner, anyValidOpts) + }) + }) + t.Run("transport failure", func(t *testing.T) { + testTransportFailure(t, newTestSetEACLServer, newTestContainerClient, func(c *Client) error { + return c.ContainerSetEACL(ctx, anyValidEACL, anyValidSigner, anyValidOpts) + }) + }) + t.Run("response message decoding failure", func(t *testing.T) { + testUnaryRPCResponseTypeMismatch(t, "container.ContainerService", "SetExtendedACL", func(c *Client) error { + return c.ContainerSetEACL(ctx, anyValidEACL, anyValidSigner, anyValidOpts) + }) + }) + t.Run("invalid response verification header", func(t *testing.T) { + testInvalidResponseSignatures(t, newTestSetEACLServer, newTestContainerClient, func(c *Client) error { + return c.ContainerSetEACL(ctx, anyValidEACL, anyValidSigner, anyValidOpts) + }) + }) + t.Run("response callback", func(t *testing.T) { + testResponseCallback(t, newTestSetEACLServer, newDefaultContainerService, func(c *Client) error { + return c.ContainerSetEACL(ctx, anyValidEACL, anyValidSigner, anyValidOpts) + }) + }) + t.Run("exec statistics", func(t *testing.T) { + testStatistic(t, newTestSetEACLServer, newDefaultContainerService, stat.MethodContainerSetEACL, + []testedClientOp{func(c *Client) error { + return c.ContainerSetEACL(ctx, anyValidEACL, nil, anyValidOpts) + }}, []testedClientOp{func(c *Client) error { + return c.ContainerSetEACL(ctx, anyValidEACL, usertest.FailSigner(anyValidSigner), anyValidOpts) + }}, func(c *Client) error { + return c.ContainerSetEACL(ctx, anyValidEACL, anyValidSigner, anyValidOpts) + }, + ) + }) +} + +func TestClient_ContainerAnnounceUsedSpace(t *testing.T) { + c := newClient(t) + ctx := context.Background() + var anyValidOpts PrmAnnounceSpace + anyValidAnnouncements := []container.SizeEstimation{containertest.SizeEstimation(), containertest.SizeEstimation()} + + t.Run("invalid user input", func(t *testing.T) { + t.Run("missing announcements", func(t *testing.T) { + err := c.ContainerAnnounceUsedSpace(ctx, nil, anyValidOpts) + require.ErrorIs(t, err, ErrMissingAnnouncements) + err = c.ContainerAnnounceUsedSpace(ctx, []container.SizeEstimation{}, anyValidOpts) + require.ErrorIs(t, err, ErrMissingAnnouncements) + }) + }) + t.Run("exact in-out", func(t *testing.T) { + /* + This test is dedicated for cases when user input results in sending a certain + request to the server and receiving a specific response to it. For user input + errors, transport, client internals, etc. see/add other tests. + */ + t.Run("default", func(t *testing.T) { + srv := newTestAnnounceContainerSpaceServer() + srv.checkRequestAnnouncements(anyValidAnnouncements) + c := newTestContainerClient(t, srv) + + err := c.ContainerAnnounceUsedSpace(ctx, anyValidAnnouncements, PrmAnnounceSpace{}) + require.NoError(t, err) + }) + t.Run("options", func(t *testing.T) { + t.Run("X-headers", func(t *testing.T) { + xhdrs := []string{ + "x-key1", "x-val1", + "x-key2", "x-val2", + } + srv := newTestAnnounceContainerSpaceServer() + srv.checkRequestXHeaders(xhdrs) + c := newTestContainerClient(t, srv) + + opts := anyValidOpts + opts.WithXHeaders(xhdrs...) + err := c.ContainerAnnounceUsedSpace(ctx, anyValidAnnouncements, opts) + require.NoError(t, err) + }) + }) + t.Run("statuses", func(t *testing.T) { + srv := newTestAnnounceContainerSpaceServer() + c := newTestContainerClient(t, srv) + + type customStatusTestcase struct { + msg string + detail *protostatus.Status_Detail + assert func(testing.TB, error) + } + for _, tc := range []struct { + code uint32 + err error + constErr error + custom []customStatusTestcase + }{ + // TODO: use const codes after transition to current module's proto lib + {code: 1024, err: new(apistatus.ServerInternal), constErr: apistatus.ErrServerInternal, custom: []customStatusTestcase{ + {msg: "some server failure", assert: func(t testing.TB, err error) { + var e *apistatus.ServerInternal + require.ErrorAs(t, err, &e) + require.Equal(t, "some server failure", e.Message()) + }}, + }}, + {code: 1025, err: new(apistatus.WrongMagicNumber), constErr: apistatus.ErrWrongMagicNumber, custom: []customStatusTestcase{ + {assert: func(t testing.TB, err error) { + var e *apistatus.WrongMagicNumber + require.ErrorAs(t, err, &e) + _, ok := e.CorrectMagic() + require.Zero(t, ok) + }}, + { + detail: &protostatus.Status_Detail{Id: 0, Value: []byte{140, 15, 162, 245, 219, 236, 37, 191}}, + assert: func(t testing.TB, err error) { + var e *apistatus.WrongMagicNumber + require.ErrorAs(t, err, &e) + magic, ok := e.CorrectMagic() + require.EqualValues(t, 1, ok) + require.EqualValues(t, uint64(10092464466800944575), magic) + }, + }, + { + detail: &protostatus.Status_Detail{Id: 0, Value: []byte{1, 2, 3}}, + assert: func(t testing.TB, err error) { + var e *apistatus.WrongMagicNumber + require.ErrorAs(t, err, &e) + _, ok := e.CorrectMagic() + require.EqualValues(t, -1, ok) + }, + }, + }}, + {code: 1026, err: new(apistatus.SignatureVerification), constErr: apistatus.ErrSignatureVerification, custom: []customStatusTestcase{ + {msg: "invalid request signature", assert: func(t testing.TB, err error) { + var e *apistatus.SignatureVerification + require.ErrorAs(t, err, &e) + require.Equal(t, "invalid request signature", e.Message()) + }}, + }}, + {code: 1027, err: new(apistatus.NodeUnderMaintenance), constErr: apistatus.ErrNodeUnderMaintenance, custom: []customStatusTestcase{ + {msg: "node is under maintenance", assert: func(t testing.TB, err error) { + var e *apistatus.NodeUnderMaintenance + require.ErrorAs(t, err, &e) + require.Equal(t, "node is under maintenance", e.Message()) + }}, + }}, + } { + st := &protostatus.Status{Code: tc.code} + srv.respondWithStatus(st) + + err := c.ContainerAnnounceUsedSpace(ctx, anyValidAnnouncements, anyValidOpts) + require.ErrorAs(t, err, &tc.err) + require.ErrorIs(t, err, tc.constErr) + + for _, tcCustom := range tc.custom { + st.Message = tcCustom.msg + if tcCustom.detail != nil { + st.Details = []*protostatus.Status_Detail{tcCustom.detail} + } + srv.respondWithStatus(st) + + err := c.ContainerAnnounceUsedSpace(ctx, anyValidAnnouncements, anyValidOpts) + require.ErrorAs(t, err, &tc.err) + tcCustom.assert(t, tc.err) + } + } + }) + }) + t.Run("sign request failure", func(t *testing.T) { + testSignRequestFailure(t, func(c *Client) error { + return c.ContainerAnnounceUsedSpace(ctx, anyValidAnnouncements, anyValidOpts) + }) + }) + t.Run("transport failure", func(t *testing.T) { + testTransportFailure(t, newTestAnnounceContainerSpaceServer, newTestContainerClient, func(c *Client) error { + return c.ContainerAnnounceUsedSpace(ctx, anyValidAnnouncements, anyValidOpts) + }) + }) + t.Run("response message decoding failure", func(t *testing.T) { + testUnaryRPCResponseTypeMismatch(t, "container.ContainerService", "AnnounceUsedSpace", func(c *Client) error { + return c.ContainerAnnounceUsedSpace(ctx, anyValidAnnouncements, anyValidOpts) + }) + }) + t.Run("invalid response verification header", func(t *testing.T) { + testInvalidResponseSignatures(t, newTestAnnounceContainerSpaceServer, newTestContainerClient, func(c *Client) error { + return c.ContainerAnnounceUsedSpace(ctx, anyValidAnnouncements, anyValidOpts) + }) + }) + t.Run("response callback", func(t *testing.T) { + testResponseCallback(t, newTestAnnounceContainerSpaceServer, newDefaultContainerService, func(c *Client) error { + return c.ContainerAnnounceUsedSpace(ctx, anyValidAnnouncements, anyValidOpts) + }) + }) + t.Run("exec statistics", func(t *testing.T) { + testStatistic(t, newTestAnnounceContainerSpaceServer, newDefaultContainerService, stat.MethodContainerAnnounceUsedSpace, + nil, []testedClientOp{func(c *Client) error { + return c.ContainerAnnounceUsedSpace(ctx, nil, anyValidOpts) + }}, func(c *Client) error { + return c.ContainerAnnounceUsedSpace(ctx, anyValidAnnouncements, anyValidOpts) + }, + ) + }) }