diff --git a/accounting/decimal.go b/accounting/decimal.go index 9cea6618f..615fd5799 100644 --- a/accounting/decimal.go +++ b/accounting/decimal.go @@ -1,93 +1,107 @@ package accounting import ( - "github.com/nspcc-dev/neofs-api-go/v2/accounting" + "fmt" + + "github.com/nspcc-dev/neofs-sdk-go/api/accounting" + "google.golang.org/protobuf/proto" ) // Decimal represents decimal number for accounting operations. // -// Decimal is mutually compatible with github.com/nspcc-dev/neofs-api-go/v2/accounting.Decimal -// message. See ReadFromV2 / WriteToV2 methods. +// Decimal is mutually compatible with [accounting.Decimal] message. See +// [Decimal.ReadFromV2] / [Decimal.WriteToV2] methods. // // Instances can be created using built-in var declaration. -// -// Note that direct typecast is not safe and may result in loss of compatibility: -// -// _ = Decimal(accounting.Decimal{}) // not recommended -type Decimal accounting.Decimal +type Decimal struct { + val int64 + prec uint32 +} -// ReadFromV2 reads Decimal from the accounting.Decimal message. Checks if the -// message conforms to NeoFS API V2 protocol. +// ReadFromV2 reads Decimal from the [accounting.Decimal] message. Returns an +// error if the message is malformed according to the NeoFS API V2 protocol. The +// message must not be nil. // -// See also WriteToV2. -func (d *Decimal) ReadFromV2(m accounting.Decimal) error { - *d = Decimal(m) +// ReadFromV2 is intended to be used by the NeoFS API V2 client/server +// implementation only and is not expected to be directly used by applications. +// +// See also [Decimal.WriteToV2]. +func (d *Decimal) ReadFromV2(m *accounting.Decimal) error { + d.val = m.Value + d.prec = m.Precision return nil } -// WriteToV2 writes Decimal to the accounting.Decimal message. -// The message must not be nil. +// WriteToV2 writes Decimal to the [accounting.Decimal] message of the NeoFS API +// protocol. +// +// WriteToV2 is intended to be used by the NeoFS API V2 client/server +// implementation only and is not expected to be directly used by applications. // -// See also ReadFromV2. +// See also [Decimal.ReadFromV2]. func (d Decimal) WriteToV2(m *accounting.Decimal) { - *m = (accounting.Decimal)(d) + m.Value = d.val + m.Precision = d.prec } // Value returns value of the decimal number. // // Zero Decimal has zero value. // -// See also SetValue. +// See also [Decimal.SetValue]. func (d Decimal) Value() int64 { - return (*accounting.Decimal)(&d).GetValue() + return d.val } // SetValue sets value of the decimal number. // -// See also Value. +// See also [Decimal.Value]. func (d *Decimal) SetValue(v int64) { - (*accounting.Decimal)(d).SetValue(v) + d.val = v } // Precision returns precision of the decimal number. // // Zero Decimal has zero precision. // -// See also SetPrecision. +// See also [Decimal.SetPrecision]. func (d Decimal) Precision() uint32 { - return (*accounting.Decimal)(&d).GetPrecision() + return d.prec } // SetPrecision sets precision of the decimal number. // -// See also Precision. +// See also [Decimal.Precision]. func (d *Decimal) SetPrecision(p uint32) { - (*accounting.Decimal)(d).SetPrecision(p) + d.prec = p } +// TODO: why needed? if so, can be non-deterministic? + // Marshal encodes Decimal into a binary format of the NeoFS API protocol // (Protocol Buffers with direct field order). // -// See also Unmarshal. +// See also [Decimal.Unmarshal]. func (d Decimal) Marshal() []byte { var m accounting.Decimal d.WriteToV2(&m) - return m.StableMarshal(nil) + b := make([]byte, m.MarshaledSize()) + m.MarshalStable(b) + return b } // Unmarshal decodes NeoFS API protocol binary format into the Decimal // (Protocol Buffers with direct field order). Returns an error describing // a format violation. // -// See also Marshal. +// See also [Decimal.Marshal]. func (d *Decimal) Unmarshal(data []byte) error { var m accounting.Decimal - - err := m.Unmarshal(data) + err := proto.Unmarshal(data, &m) if err != nil { - return err + return fmt.Errorf("decode protobuf") } - return d.ReadFromV2(m) + return d.ReadFromV2(&m) } diff --git a/accounting/decimal_test.go b/accounting/decimal_test.go index 18392c521..e7af0af0f 100644 --- a/accounting/decimal_test.go +++ b/accounting/decimal_test.go @@ -3,54 +3,74 @@ package accounting_test import ( "testing" - v2accounting "github.com/nspcc-dev/neofs-api-go/v2/accounting" "github.com/nspcc-dev/neofs-sdk-go/accounting" - accountingtest "github.com/nspcc-dev/neofs-sdk-go/accounting/test" + apiaccounting "github.com/nspcc-dev/neofs-sdk-go/api/accounting" "github.com/stretchr/testify/require" ) -func TestDecimalData(t *testing.T) { - const v, p = 4, 2 +func TestDecimal_Unmarshal(t *testing.T) { + t.Run("invalid binary", func(t *testing.T) { + var d accounting.Decimal + msg := []byte("definitely_not_protobuf") + err := d.Unmarshal(msg) + require.ErrorContains(t, err, "decode protobuf") + }) +} +func testDecimalField[Type uint32 | int64](t *testing.T, get func(accounting.Decimal) Type, set func(*accounting.Decimal, Type), + getAPI func(info *apiaccounting.Decimal) Type) { var d accounting.Decimal - require.Zero(t, d.Value()) - require.Zero(t, d.Precision()) + require.Zero(t, get(d)) - d.SetValue(v) - d.SetPrecision(p) + const val = 13 + set(&d, val) + require.EqualValues(t, val, get(d)) - require.EqualValues(t, v, d.Value()) - require.EqualValues(t, p, d.Precision()) -} + const valOther = 42 + set(&d, valOther) + require.EqualValues(t, valOther, get(d)) -func TestDecimalMessageV2(t *testing.T) { - var ( - d accounting.Decimal - m v2accounting.Decimal - ) + t.Run("encoding", func(t *testing.T) { + t.Run("binary", func(t *testing.T) { + var src, dst accounting.Decimal - m.SetValue(7) - m.SetPrecision(8) + set(&dst, val) - require.NoError(t, d.ReadFromV2(m)) + require.NoError(t, dst.Unmarshal(src.Marshal())) + require.Zero(t, get(dst)) - require.EqualValues(t, m.GetValue(), d.Value()) - require.EqualValues(t, m.GetPrecision(), d.Precision()) + set(&src, val) - var m2 v2accounting.Decimal + require.NoError(t, dst.Unmarshal(src.Marshal())) + require.EqualValues(t, val, get(dst)) + }) + t.Run("api", func(t *testing.T) { + var src, dst accounting.Decimal + var msg apiaccounting.Decimal - d.WriteToV2(&m2) + set(&dst, val) - require.EqualValues(t, d.Value(), m2.GetValue()) - require.EqualValues(t, d.Precision(), m2.GetPrecision()) -} + src.WriteToV2(&msg) + require.Zero(t, getAPI(&msg)) + require.NoError(t, dst.ReadFromV2(&msg)) + require.Zero(t, get(dst)) -func TestDecimal_Marshal(t *testing.T) { - d := accountingtest.Decimal() + set(&src, val) - var d2 accounting.Decimal - require.NoError(t, d2.Unmarshal(d.Marshal())) + src.WriteToV2(&msg) + require.EqualValues(t, val, getAPI(&msg)) + err := dst.ReadFromV2(&msg) + require.NoError(t, err) + require.EqualValues(t, val, get(dst)) + }) + }) +} + +func TestDecimal_SetValue(t *testing.T) { + testDecimalField(t, accounting.Decimal.Value, (*accounting.Decimal).SetValue, (*apiaccounting.Decimal).GetValue) +} - require.Equal(t, d, d2) +func TestDecimal_SetPrecision(t *testing.T) { + testDecimalField(t, accounting.Decimal.Precision, (*accounting.Decimal).SetPrecision, (*apiaccounting.Decimal).GetPrecision) } diff --git a/accounting/example_test.go b/accounting/example_test.go index ed33b09cd..a875a6dc4 100644 --- a/accounting/example_test.go +++ b/accounting/example_test.go @@ -1,8 +1,8 @@ package accounting_test import ( - apiGoAccounting "github.com/nspcc-dev/neofs-api-go/v2/accounting" "github.com/nspcc-dev/neofs-sdk-go/accounting" + apiaccounting "github.com/nspcc-dev/neofs-sdk-go/api/accounting" ) func Example() { @@ -16,11 +16,10 @@ func Example() { // On the client side. - // import apiGoAccounting "github.com/nspcc-dev/neofs-api-go/v2/accounting" - var msg apiGoAccounting.Decimal + var msg apiaccounting.Decimal dec.WriteToV2(&msg) // *send message* // On the server side. - _ = dec.ReadFromV2(msg) + _ = dec.ReadFromV2(&msg) } diff --git a/accounting/test/decimal_test.go b/accounting/test/decimal_test.go new file mode 100644 index 000000000..f6a7f7afe --- /dev/null +++ b/accounting/test/decimal_test.go @@ -0,0 +1,25 @@ +package accountingtest_test + +import ( + "testing" + + "github.com/nspcc-dev/neofs-sdk-go/accounting" + accountingtest "github.com/nspcc-dev/neofs-sdk-go/accounting/test" + apiaccounting "github.com/nspcc-dev/neofs-sdk-go/api/accounting" + "github.com/stretchr/testify/require" +) + +func TestDecimal(t *testing.T) { + d := accountingtest.Decimal() + require.NotEqual(t, d, accountingtest.Decimal()) + + var d2 accounting.Decimal + require.NoError(t, d2.Unmarshal(d.Marshal())) + require.Equal(t, d, d2) + + var m apiaccounting.Decimal + d.WriteToV2(&m) + var d3 accounting.Decimal + require.NoError(t, d3.ReadFromV2(&m)) + require.Equal(t, d, d3) +} diff --git a/api/accounting/encoding.go b/api/accounting/encoding.go new file mode 100644 index 000000000..328cd7e31 --- /dev/null +++ b/api/accounting/encoding.go @@ -0,0 +1,65 @@ +package accounting + +import ( + "github.com/nspcc-dev/neofs-sdk-go/internal/proto" +) + +const ( + _ = iota + fieldDecimalValue + fieldDecimalPrecision +) + +func (x *Decimal) MarshaledSize() int { + var sz int + if x != nil { + sz = proto.SizeVarint(fieldDecimalValue, x.Value) + + proto.SizeVarint(fieldDecimalPrecision, x.Precision) + } + return sz +} + +func (x *Decimal) MarshalStable(b []byte) { + if x != nil { + off := proto.MarshalVarint(b, fieldDecimalValue, x.Value) + proto.MarshalVarint(b[off:], fieldDecimalPrecision, x.Precision) + } +} + +const ( + _ = iota + fieldBalanceReqOwner +) + +func (x *BalanceRequest_Body) MarshaledSize() int { + var sz int + if x != nil { + sz = proto.SizeNested(fieldBalanceReqOwner, x.OwnerId) + } + return sz +} + +func (x *BalanceRequest_Body) MarshalStable(b []byte) { + if x != nil { + proto.MarshalNested(b, fieldBalanceReqOwner, x.OwnerId) + } +} + +const ( + _ = iota + fieldBalanceRespBalance +) + +func (x *BalanceResponse_Body) MarshaledSize() int { + var sz int + if x != nil { + sz = proto.SizeNested(fieldBalanceRespBalance, x.Balance) + } + return sz +} + +func (x *BalanceResponse_Body) MarshalStable(b []byte) { + if x != nil { + proto.MarshalNested(b, fieldBalanceRespBalance, x.Balance) + } +} diff --git a/api/accounting/encoding_test.go b/api/accounting/encoding_test.go new file mode 100644 index 000000000..a6fdeb817 --- /dev/null +++ b/api/accounting/encoding_test.go @@ -0,0 +1,49 @@ +package accounting_test + +import ( + "testing" + + "github.com/nspcc-dev/neofs-sdk-go/api/accounting" + "github.com/nspcc-dev/neofs-sdk-go/api/refs" + "github.com/stretchr/testify/require" + "google.golang.org/protobuf/proto" +) + +func TestBalanceRequest_Body(t *testing.T) { + v := &accounting.BalanceRequest_Body{ + OwnerId: &refs.OwnerID{ + Value: []byte("any_owner"), + }, + } + + sz := v.MarshaledSize() + b := make([]byte, sz) + v.MarshalStable(b) + + var res accounting.BalanceRequest_Body + err := proto.Unmarshal(b, &res) + require.NoError(t, err) + require.Empty(t, res.ProtoReflect().GetUnknown()) + require.Equal(t, v.OwnerId, res.OwnerId) + // TODO: test field order. Pretty challenging in general, but can be simplified + // for NeoFS specifics (forbid group types, maps, etc.). +} + +func TestBalanceResponse_Body(t *testing.T) { + v := &accounting.BalanceResponse_Body{ + Balance: &accounting.Decimal{ + Value: 12, + Precision: 34, + }, + } + + sz := v.MarshaledSize() + b := make([]byte, sz) + v.MarshalStable(b) + + var res accounting.BalanceResponse_Body + err := proto.Unmarshal(b, &res) + require.NoError(t, err) + require.Empty(t, res.ProtoReflect().GetUnknown()) + require.Equal(t, v.Balance, res.Balance) +} diff --git a/api/accounting/service.pb.go b/api/accounting/service.pb.go new file mode 100644 index 000000000..b37016ced --- /dev/null +++ b/api/accounting/service.pb.go @@ -0,0 +1,451 @@ +// Code generated by protoc-gen-go. DO NOT EDIT. +// versions: +// protoc-gen-go v1.32.0 +// protoc v4.25.1 +// source: accounting/grpc/service.proto + +package accounting + +import ( + reflect "reflect" + sync "sync" + + "github.com/nspcc-dev/neofs-sdk-go/api/refs" + "github.com/nspcc-dev/neofs-sdk-go/api/session" + protoreflect "google.golang.org/protobuf/reflect/protoreflect" + protoimpl "google.golang.org/protobuf/runtime/protoimpl" +) + +const ( + // Verify that this generated code is sufficiently up-to-date. + _ = protoimpl.EnforceVersion(20 - protoimpl.MinVersion) + // Verify that runtime/protoimpl is sufficiently up-to-date. + _ = protoimpl.EnforceVersion(protoimpl.MaxVersion - 20) +) + +// BalanceRequest message +type BalanceRequest struct { + state protoimpl.MessageState + sizeCache protoimpl.SizeCache + unknownFields protoimpl.UnknownFields + + // Body of the balance request message. + Body *BalanceRequest_Body `protobuf:"bytes,1,opt,name=body,proto3" json:"body,omitempty"` + // Carries request meta information. Header data is used only to regulate + // message transport and does not affect request execution. + MetaHeader *session.RequestMetaHeader `protobuf:"bytes,2,opt,name=meta_header,json=metaHeader,proto3" json:"meta_header,omitempty"` + // Carries request verification information. This header is used to + // authenticate the nodes of the message route and check the correctness of + // transmission. + VerifyHeader *session.RequestVerificationHeader `protobuf:"bytes,3,opt,name=verify_header,json=verifyHeader,proto3" json:"verify_header,omitempty"` +} + +func (x *BalanceRequest) Reset() { + *x = BalanceRequest{} + if protoimpl.UnsafeEnabled { + mi := &file_accounting_grpc_service_proto_msgTypes[0] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) + } +} + +func (x *BalanceRequest) String() string { + return protoimpl.X.MessageStringOf(x) +} + +func (*BalanceRequest) ProtoMessage() {} + +func (x *BalanceRequest) ProtoReflect() protoreflect.Message { + mi := &file_accounting_grpc_service_proto_msgTypes[0] + if protoimpl.UnsafeEnabled && x != nil { + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + if ms.LoadMessageInfo() == nil { + ms.StoreMessageInfo(mi) + } + return ms + } + return mi.MessageOf(x) +} + +// Deprecated: Use BalanceRequest.ProtoReflect.Descriptor instead. +func (*BalanceRequest) Descriptor() ([]byte, []int) { + return file_accounting_grpc_service_proto_rawDescGZIP(), []int{0} +} + +func (x *BalanceRequest) GetBody() *BalanceRequest_Body { + if x != nil { + return x.Body + } + return nil +} + +func (x *BalanceRequest) GetMetaHeader() *session.RequestMetaHeader { + if x != nil { + return x.MetaHeader + } + return nil +} + +func (x *BalanceRequest) GetVerifyHeader() *session.RequestVerificationHeader { + if x != nil { + return x.VerifyHeader + } + return nil +} + +// BalanceResponse message +type BalanceResponse struct { + state protoimpl.MessageState + sizeCache protoimpl.SizeCache + unknownFields protoimpl.UnknownFields + + // Body of the balance response message. + Body *BalanceResponse_Body `protobuf:"bytes,1,opt,name=body,proto3" json:"body,omitempty"` + // Carries response meta information. Header data is used only to regulate + // message transport and does not affect request execution. + MetaHeader *session.ResponseMetaHeader `protobuf:"bytes,2,opt,name=meta_header,json=metaHeader,proto3" json:"meta_header,omitempty"` + // Carries response verification information. This header is used to + // authenticate the nodes of the message route and check the correctness of + // transmission. + VerifyHeader *session.ResponseVerificationHeader `protobuf:"bytes,3,opt,name=verify_header,json=verifyHeader,proto3" json:"verify_header,omitempty"` +} + +func (x *BalanceResponse) Reset() { + *x = BalanceResponse{} + if protoimpl.UnsafeEnabled { + mi := &file_accounting_grpc_service_proto_msgTypes[1] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) + } +} + +func (x *BalanceResponse) String() string { + return protoimpl.X.MessageStringOf(x) +} + +func (*BalanceResponse) ProtoMessage() {} + +func (x *BalanceResponse) ProtoReflect() protoreflect.Message { + mi := &file_accounting_grpc_service_proto_msgTypes[1] + if protoimpl.UnsafeEnabled && x != nil { + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + if ms.LoadMessageInfo() == nil { + ms.StoreMessageInfo(mi) + } + return ms + } + return mi.MessageOf(x) +} + +// Deprecated: Use BalanceResponse.ProtoReflect.Descriptor instead. +func (*BalanceResponse) Descriptor() ([]byte, []int) { + return file_accounting_grpc_service_proto_rawDescGZIP(), []int{1} +} + +func (x *BalanceResponse) GetBody() *BalanceResponse_Body { + if x != nil { + return x.Body + } + return nil +} + +func (x *BalanceResponse) GetMetaHeader() *session.ResponseMetaHeader { + if x != nil { + return x.MetaHeader + } + return nil +} + +func (x *BalanceResponse) GetVerifyHeader() *session.ResponseVerificationHeader { + if x != nil { + return x.VerifyHeader + } + return nil +} + +// To indicate the account for which the balance is requested, its identifier +// is used. It can be any existing account in NeoFS sidechain `Balance` smart +// contract. If omitted, client implementation MUST set it to the request's +// signer `OwnerID`. +type BalanceRequest_Body struct { + state protoimpl.MessageState + sizeCache protoimpl.SizeCache + unknownFields protoimpl.UnknownFields + + // Valid user identifier in `OwnerID` format for which the balance is + // requested. Required field. + OwnerId *refs.OwnerID `protobuf:"bytes,1,opt,name=owner_id,json=ownerId,proto3" json:"owner_id,omitempty"` +} + +func (x *BalanceRequest_Body) Reset() { + *x = BalanceRequest_Body{} + if protoimpl.UnsafeEnabled { + mi := &file_accounting_grpc_service_proto_msgTypes[2] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) + } +} + +func (x *BalanceRequest_Body) String() string { + return protoimpl.X.MessageStringOf(x) +} + +func (*BalanceRequest_Body) ProtoMessage() {} + +func (x *BalanceRequest_Body) ProtoReflect() protoreflect.Message { + mi := &file_accounting_grpc_service_proto_msgTypes[2] + if protoimpl.UnsafeEnabled && x != nil { + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + if ms.LoadMessageInfo() == nil { + ms.StoreMessageInfo(mi) + } + return ms + } + return mi.MessageOf(x) +} + +// Deprecated: Use BalanceRequest_Body.ProtoReflect.Descriptor instead. +func (*BalanceRequest_Body) Descriptor() ([]byte, []int) { + return file_accounting_grpc_service_proto_rawDescGZIP(), []int{0, 0} +} + +func (x *BalanceRequest_Body) GetOwnerId() *refs.OwnerID { + if x != nil { + return x.OwnerId + } + return nil +} + +// The amount of funds in GAS token for the `OwnerID`'s account requested. +// Balance is given in the `Decimal` format to avoid precision issues with rounding. +type BalanceResponse_Body struct { + state protoimpl.MessageState + sizeCache protoimpl.SizeCache + unknownFields protoimpl.UnknownFields + + // Amount of funds in GAS token for the requested account. + Balance *Decimal `protobuf:"bytes,1,opt,name=balance,proto3" json:"balance,omitempty"` +} + +func (x *BalanceResponse_Body) Reset() { + *x = BalanceResponse_Body{} + if protoimpl.UnsafeEnabled { + mi := &file_accounting_grpc_service_proto_msgTypes[3] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) + } +} + +func (x *BalanceResponse_Body) String() string { + return protoimpl.X.MessageStringOf(x) +} + +func (*BalanceResponse_Body) ProtoMessage() {} + +func (x *BalanceResponse_Body) ProtoReflect() protoreflect.Message { + mi := &file_accounting_grpc_service_proto_msgTypes[3] + if protoimpl.UnsafeEnabled && x != nil { + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + if ms.LoadMessageInfo() == nil { + ms.StoreMessageInfo(mi) + } + return ms + } + return mi.MessageOf(x) +} + +// Deprecated: Use BalanceResponse_Body.ProtoReflect.Descriptor instead. +func (*BalanceResponse_Body) Descriptor() ([]byte, []int) { + return file_accounting_grpc_service_proto_rawDescGZIP(), []int{1, 0} +} + +func (x *BalanceResponse_Body) GetBalance() *Decimal { + if x != nil { + return x.Balance + } + return nil +} + +var File_accounting_grpc_service_proto protoreflect.FileDescriptor + +var file_accounting_grpc_service_proto_rawDesc = []byte{ + 0x0a, 0x1d, 0x61, 0x63, 0x63, 0x6f, 0x75, 0x6e, 0x74, 0x69, 0x6e, 0x67, 0x2f, 0x67, 0x72, 0x70, + 0x63, 0x2f, 0x73, 0x65, 0x72, 0x76, 0x69, 0x63, 0x65, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x12, + 0x14, 0x6e, 0x65, 0x6f, 0x2e, 0x66, 0x73, 0x2e, 0x76, 0x32, 0x2e, 0x61, 0x63, 0x63, 0x6f, 0x75, + 0x6e, 0x74, 0x69, 0x6e, 0x67, 0x1a, 0x1b, 0x61, 0x63, 0x63, 0x6f, 0x75, 0x6e, 0x74, 0x69, 0x6e, + 0x67, 0x2f, 0x67, 0x72, 0x70, 0x63, 0x2f, 0x74, 0x79, 0x70, 0x65, 0x73, 0x2e, 0x70, 0x72, 0x6f, + 0x74, 0x6f, 0x1a, 0x15, 0x72, 0x65, 0x66, 0x73, 0x2f, 0x67, 0x72, 0x70, 0x63, 0x2f, 0x74, 0x79, + 0x70, 0x65, 0x73, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x1a, 0x18, 0x73, 0x65, 0x73, 0x73, 0x69, + 0x6f, 0x6e, 0x2f, 0x67, 0x72, 0x70, 0x63, 0x2f, 0x74, 0x79, 0x70, 0x65, 0x73, 0x2e, 0x70, 0x72, + 0x6f, 0x74, 0x6f, 0x22, 0xa5, 0x02, 0x0a, 0x0e, 0x42, 0x61, 0x6c, 0x61, 0x6e, 0x63, 0x65, 0x52, + 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x12, 0x3d, 0x0a, 0x04, 0x62, 0x6f, 0x64, 0x79, 0x18, 0x01, + 0x20, 0x01, 0x28, 0x0b, 0x32, 0x29, 0x2e, 0x6e, 0x65, 0x6f, 0x2e, 0x66, 0x73, 0x2e, 0x76, 0x32, + 0x2e, 0x61, 0x63, 0x63, 0x6f, 0x75, 0x6e, 0x74, 0x69, 0x6e, 0x67, 0x2e, 0x42, 0x61, 0x6c, 0x61, + 0x6e, 0x63, 0x65, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x2e, 0x42, 0x6f, 0x64, 0x79, 0x52, + 0x04, 0x62, 0x6f, 0x64, 0x79, 0x12, 0x45, 0x0a, 0x0b, 0x6d, 0x65, 0x74, 0x61, 0x5f, 0x68, 0x65, + 0x61, 0x64, 0x65, 0x72, 0x18, 0x02, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x24, 0x2e, 0x6e, 0x65, 0x6f, + 0x2e, 0x66, 0x73, 0x2e, 0x76, 0x32, 0x2e, 0x73, 0x65, 0x73, 0x73, 0x69, 0x6f, 0x6e, 0x2e, 0x52, + 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x4d, 0x65, 0x74, 0x61, 0x48, 0x65, 0x61, 0x64, 0x65, 0x72, + 0x52, 0x0a, 0x6d, 0x65, 0x74, 0x61, 0x48, 0x65, 0x61, 0x64, 0x65, 0x72, 0x12, 0x51, 0x0a, 0x0d, + 0x76, 0x65, 0x72, 0x69, 0x66, 0x79, 0x5f, 0x68, 0x65, 0x61, 0x64, 0x65, 0x72, 0x18, 0x03, 0x20, + 0x01, 0x28, 0x0b, 0x32, 0x2c, 0x2e, 0x6e, 0x65, 0x6f, 0x2e, 0x66, 0x73, 0x2e, 0x76, 0x32, 0x2e, + 0x73, 0x65, 0x73, 0x73, 0x69, 0x6f, 0x6e, 0x2e, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x56, + 0x65, 0x72, 0x69, 0x66, 0x69, 0x63, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x48, 0x65, 0x61, 0x64, 0x65, + 0x72, 0x52, 0x0c, 0x76, 0x65, 0x72, 0x69, 0x66, 0x79, 0x48, 0x65, 0x61, 0x64, 0x65, 0x72, 0x1a, + 0x3a, 0x0a, 0x04, 0x42, 0x6f, 0x64, 0x79, 0x12, 0x32, 0x0a, 0x08, 0x6f, 0x77, 0x6e, 0x65, 0x72, + 0x5f, 0x69, 0x64, 0x18, 0x01, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x17, 0x2e, 0x6e, 0x65, 0x6f, 0x2e, + 0x66, 0x73, 0x2e, 0x76, 0x32, 0x2e, 0x72, 0x65, 0x66, 0x73, 0x2e, 0x4f, 0x77, 0x6e, 0x65, 0x72, + 0x49, 0x44, 0x52, 0x07, 0x6f, 0x77, 0x6e, 0x65, 0x72, 0x49, 0x64, 0x22, 0xae, 0x02, 0x0a, 0x0f, + 0x42, 0x61, 0x6c, 0x61, 0x6e, 0x63, 0x65, 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x12, + 0x3e, 0x0a, 0x04, 0x62, 0x6f, 0x64, 0x79, 0x18, 0x01, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x2a, 0x2e, + 0x6e, 0x65, 0x6f, 0x2e, 0x66, 0x73, 0x2e, 0x76, 0x32, 0x2e, 0x61, 0x63, 0x63, 0x6f, 0x75, 0x6e, + 0x74, 0x69, 0x6e, 0x67, 0x2e, 0x42, 0x61, 0x6c, 0x61, 0x6e, 0x63, 0x65, 0x52, 0x65, 0x73, 0x70, + 0x6f, 0x6e, 0x73, 0x65, 0x2e, 0x42, 0x6f, 0x64, 0x79, 0x52, 0x04, 0x62, 0x6f, 0x64, 0x79, 0x12, + 0x46, 0x0a, 0x0b, 0x6d, 0x65, 0x74, 0x61, 0x5f, 0x68, 0x65, 0x61, 0x64, 0x65, 0x72, 0x18, 0x02, + 0x20, 0x01, 0x28, 0x0b, 0x32, 0x25, 0x2e, 0x6e, 0x65, 0x6f, 0x2e, 0x66, 0x73, 0x2e, 0x76, 0x32, + 0x2e, 0x73, 0x65, 0x73, 0x73, 0x69, 0x6f, 0x6e, 0x2e, 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, + 0x65, 0x4d, 0x65, 0x74, 0x61, 0x48, 0x65, 0x61, 0x64, 0x65, 0x72, 0x52, 0x0a, 0x6d, 0x65, 0x74, + 0x61, 0x48, 0x65, 0x61, 0x64, 0x65, 0x72, 0x12, 0x52, 0x0a, 0x0d, 0x76, 0x65, 0x72, 0x69, 0x66, + 0x79, 0x5f, 0x68, 0x65, 0x61, 0x64, 0x65, 0x72, 0x18, 0x03, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x2d, + 0x2e, 0x6e, 0x65, 0x6f, 0x2e, 0x66, 0x73, 0x2e, 0x76, 0x32, 0x2e, 0x73, 0x65, 0x73, 0x73, 0x69, + 0x6f, 0x6e, 0x2e, 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x56, 0x65, 0x72, 0x69, 0x66, + 0x69, 0x63, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x48, 0x65, 0x61, 0x64, 0x65, 0x72, 0x52, 0x0c, 0x76, + 0x65, 0x72, 0x69, 0x66, 0x79, 0x48, 0x65, 0x61, 0x64, 0x65, 0x72, 0x1a, 0x3f, 0x0a, 0x04, 0x42, + 0x6f, 0x64, 0x79, 0x12, 0x37, 0x0a, 0x07, 0x62, 0x61, 0x6c, 0x61, 0x6e, 0x63, 0x65, 0x18, 0x01, + 0x20, 0x01, 0x28, 0x0b, 0x32, 0x1d, 0x2e, 0x6e, 0x65, 0x6f, 0x2e, 0x66, 0x73, 0x2e, 0x76, 0x32, + 0x2e, 0x61, 0x63, 0x63, 0x6f, 0x75, 0x6e, 0x74, 0x69, 0x6e, 0x67, 0x2e, 0x44, 0x65, 0x63, 0x69, + 0x6d, 0x61, 0x6c, 0x52, 0x07, 0x62, 0x61, 0x6c, 0x61, 0x6e, 0x63, 0x65, 0x32, 0x6b, 0x0a, 0x11, + 0x41, 0x63, 0x63, 0x6f, 0x75, 0x6e, 0x74, 0x69, 0x6e, 0x67, 0x53, 0x65, 0x72, 0x76, 0x69, 0x63, + 0x65, 0x12, 0x56, 0x0a, 0x07, 0x42, 0x61, 0x6c, 0x61, 0x6e, 0x63, 0x65, 0x12, 0x24, 0x2e, 0x6e, + 0x65, 0x6f, 0x2e, 0x66, 0x73, 0x2e, 0x76, 0x32, 0x2e, 0x61, 0x63, 0x63, 0x6f, 0x75, 0x6e, 0x74, + 0x69, 0x6e, 0x67, 0x2e, 0x42, 0x61, 0x6c, 0x61, 0x6e, 0x63, 0x65, 0x52, 0x65, 0x71, 0x75, 0x65, + 0x73, 0x74, 0x1a, 0x25, 0x2e, 0x6e, 0x65, 0x6f, 0x2e, 0x66, 0x73, 0x2e, 0x76, 0x32, 0x2e, 0x61, + 0x63, 0x63, 0x6f, 0x75, 0x6e, 0x74, 0x69, 0x6e, 0x67, 0x2e, 0x42, 0x61, 0x6c, 0x61, 0x6e, 0x63, + 0x65, 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x42, 0x62, 0x5a, 0x3f, 0x67, 0x69, 0x74, + 0x68, 0x75, 0x62, 0x2e, 0x63, 0x6f, 0x6d, 0x2f, 0x6e, 0x73, 0x70, 0x63, 0x63, 0x2d, 0x64, 0x65, + 0x76, 0x2f, 0x6e, 0x65, 0x6f, 0x66, 0x73, 0x2d, 0x61, 0x70, 0x69, 0x2d, 0x67, 0x6f, 0x2f, 0x76, + 0x32, 0x2f, 0x61, 0x63, 0x63, 0x6f, 0x75, 0x6e, 0x74, 0x69, 0x6e, 0x67, 0x2f, 0x67, 0x72, 0x70, + 0x63, 0x3b, 0x61, 0x63, 0x63, 0x6f, 0x75, 0x6e, 0x74, 0x69, 0x6e, 0x67, 0xaa, 0x02, 0x1e, 0x4e, + 0x65, 0x6f, 0x2e, 0x46, 0x69, 0x6c, 0x65, 0x53, 0x74, 0x6f, 0x72, 0x61, 0x67, 0x65, 0x2e, 0x41, + 0x50, 0x49, 0x2e, 0x41, 0x63, 0x63, 0x6f, 0x75, 0x6e, 0x74, 0x69, 0x6e, 0x67, 0x62, 0x06, 0x70, + 0x72, 0x6f, 0x74, 0x6f, 0x33, +} + +var ( + file_accounting_grpc_service_proto_rawDescOnce sync.Once + file_accounting_grpc_service_proto_rawDescData = file_accounting_grpc_service_proto_rawDesc +) + +func file_accounting_grpc_service_proto_rawDescGZIP() []byte { + file_accounting_grpc_service_proto_rawDescOnce.Do(func() { + file_accounting_grpc_service_proto_rawDescData = protoimpl.X.CompressGZIP(file_accounting_grpc_service_proto_rawDescData) + }) + return file_accounting_grpc_service_proto_rawDescData +} + +var file_accounting_grpc_service_proto_msgTypes = make([]protoimpl.MessageInfo, 4) +var file_accounting_grpc_service_proto_goTypes = []interface{}{ + (*BalanceRequest)(nil), // 0: neo.fs.v2.accounting.BalanceRequest + (*BalanceResponse)(nil), // 1: neo.fs.v2.accounting.BalanceResponse + (*BalanceRequest_Body)(nil), // 2: neo.fs.v2.accounting.BalanceRequest.Body + (*BalanceResponse_Body)(nil), // 3: neo.fs.v2.accounting.BalanceResponse.Body + (*session.RequestMetaHeader)(nil), // 4: neo.fs.v2.session.RequestMetaHeader + (*session.RequestVerificationHeader)(nil), // 5: neo.fs.v2.session.RequestVerificationHeader + (*session.ResponseMetaHeader)(nil), // 6: neo.fs.v2.session.ResponseMetaHeader + (*session.ResponseVerificationHeader)(nil), // 7: neo.fs.v2.session.ResponseVerificationHeader + (*refs.OwnerID)(nil), // 8: neo.fs.v2.refs.OwnerID + (*Decimal)(nil), // 9: neo.fs.v2.accounting.Decimal +} +var file_accounting_grpc_service_proto_depIdxs = []int32{ + 2, // 0: neo.fs.v2.accounting.BalanceRequest.body:type_name -> neo.fs.v2.accounting.BalanceRequest.Body + 4, // 1: neo.fs.v2.accounting.BalanceRequest.meta_header:type_name -> neo.fs.v2.session.RequestMetaHeader + 5, // 2: neo.fs.v2.accounting.BalanceRequest.verify_header:type_name -> neo.fs.v2.session.RequestVerificationHeader + 3, // 3: neo.fs.v2.accounting.BalanceResponse.body:type_name -> neo.fs.v2.accounting.BalanceResponse.Body + 6, // 4: neo.fs.v2.accounting.BalanceResponse.meta_header:type_name -> neo.fs.v2.session.ResponseMetaHeader + 7, // 5: neo.fs.v2.accounting.BalanceResponse.verify_header:type_name -> neo.fs.v2.session.ResponseVerificationHeader + 8, // 6: neo.fs.v2.accounting.BalanceRequest.Body.owner_id:type_name -> neo.fs.v2.refs.OwnerID + 9, // 7: neo.fs.v2.accounting.BalanceResponse.Body.balance:type_name -> neo.fs.v2.accounting.Decimal + 0, // 8: neo.fs.v2.accounting.AccountingService.Balance:input_type -> neo.fs.v2.accounting.BalanceRequest + 1, // 9: neo.fs.v2.accounting.AccountingService.Balance:output_type -> neo.fs.v2.accounting.BalanceResponse + 9, // [9:10] is the sub-list for method output_type + 8, // [8:9] is the sub-list for method input_type + 8, // [8:8] is the sub-list for extension type_name + 8, // [8:8] is the sub-list for extension extendee + 0, // [0:8] is the sub-list for field type_name +} + +func init() { file_accounting_grpc_service_proto_init() } +func file_accounting_grpc_service_proto_init() { + if File_accounting_grpc_service_proto != nil { + return + } + file_accounting_grpc_types_proto_init() + if !protoimpl.UnsafeEnabled { + file_accounting_grpc_service_proto_msgTypes[0].Exporter = func(v interface{}, i int) interface{} { + switch v := v.(*BalanceRequest); i { + case 0: + return &v.state + case 1: + return &v.sizeCache + case 2: + return &v.unknownFields + default: + return nil + } + } + file_accounting_grpc_service_proto_msgTypes[1].Exporter = func(v interface{}, i int) interface{} { + switch v := v.(*BalanceResponse); i { + case 0: + return &v.state + case 1: + return &v.sizeCache + case 2: + return &v.unknownFields + default: + return nil + } + } + file_accounting_grpc_service_proto_msgTypes[2].Exporter = func(v interface{}, i int) interface{} { + switch v := v.(*BalanceRequest_Body); i { + case 0: + return &v.state + case 1: + return &v.sizeCache + case 2: + return &v.unknownFields + default: + return nil + } + } + file_accounting_grpc_service_proto_msgTypes[3].Exporter = func(v interface{}, i int) interface{} { + switch v := v.(*BalanceResponse_Body); i { + case 0: + return &v.state + case 1: + return &v.sizeCache + case 2: + return &v.unknownFields + default: + return nil + } + } + } + type x struct{} + out := protoimpl.TypeBuilder{ + File: protoimpl.DescBuilder{ + GoPackagePath: reflect.TypeOf(x{}).PkgPath(), + RawDescriptor: file_accounting_grpc_service_proto_rawDesc, + NumEnums: 0, + NumMessages: 4, + NumExtensions: 0, + NumServices: 1, + }, + GoTypes: file_accounting_grpc_service_proto_goTypes, + DependencyIndexes: file_accounting_grpc_service_proto_depIdxs, + MessageInfos: file_accounting_grpc_service_proto_msgTypes, + }.Build() + File_accounting_grpc_service_proto = out.File + file_accounting_grpc_service_proto_rawDesc = nil + file_accounting_grpc_service_proto_goTypes = nil + file_accounting_grpc_service_proto_depIdxs = nil +} diff --git a/api/accounting/service_grpc.pb.go b/api/accounting/service_grpc.pb.go new file mode 100644 index 000000000..27f407f7c --- /dev/null +++ b/api/accounting/service_grpc.pb.go @@ -0,0 +1,119 @@ +// Code generated by protoc-gen-go-grpc. DO NOT EDIT. +// versions: +// - protoc-gen-go-grpc v1.3.0 +// - protoc v4.25.1 +// source: accounting/grpc/service.proto + +package accounting + +import ( + context "context" + grpc "google.golang.org/grpc" + codes "google.golang.org/grpc/codes" + status "google.golang.org/grpc/status" +) + +// This is a compile-time assertion to ensure that this generated file +// is compatible with the grpc package it is being compiled against. +// Requires gRPC-Go v1.32.0 or later. +const _ = grpc.SupportPackageIsVersion7 + +const ( + AccountingService_Balance_FullMethodName = "/neo.fs.v2.accounting.AccountingService/Balance" +) + +// AccountingServiceClient is the client API for AccountingService service. +// +// For semantics around ctx use and closing/ending streaming RPCs, please refer to https://pkg.go.dev/google.golang.org/grpc/?tab=doc#ClientConn.NewStream. +type AccountingServiceClient interface { + // Returns the amount of funds in GAS token for the requested NeoFS account. + // + // Statuses: + // - **OK** (0, SECTION_SUCCESS): + // balance has been successfully read; + // - Common failures (SECTION_FAILURE_COMMON). + Balance(ctx context.Context, in *BalanceRequest, opts ...grpc.CallOption) (*BalanceResponse, error) +} + +type accountingServiceClient struct { + cc grpc.ClientConnInterface +} + +func NewAccountingServiceClient(cc grpc.ClientConnInterface) AccountingServiceClient { + return &accountingServiceClient{cc} +} + +func (c *accountingServiceClient) Balance(ctx context.Context, in *BalanceRequest, opts ...grpc.CallOption) (*BalanceResponse, error) { + out := new(BalanceResponse) + err := c.cc.Invoke(ctx, AccountingService_Balance_FullMethodName, in, out, opts...) + if err != nil { + return nil, err + } + return out, nil +} + +// AccountingServiceServer is the server API for AccountingService service. +// All implementations should embed UnimplementedAccountingServiceServer +// for forward compatibility +type AccountingServiceServer interface { + // Returns the amount of funds in GAS token for the requested NeoFS account. + // + // Statuses: + // - **OK** (0, SECTION_SUCCESS): + // balance has been successfully read; + // - Common failures (SECTION_FAILURE_COMMON). + Balance(context.Context, *BalanceRequest) (*BalanceResponse, error) +} + +// UnimplementedAccountingServiceServer should be embedded to have forward compatible implementations. +type UnimplementedAccountingServiceServer struct { +} + +func (UnimplementedAccountingServiceServer) Balance(context.Context, *BalanceRequest) (*BalanceResponse, error) { + return nil, status.Errorf(codes.Unimplemented, "method Balance not implemented") +} + +// UnsafeAccountingServiceServer may be embedded to opt out of forward compatibility for this service. +// Use of this interface is not recommended, as added methods to AccountingServiceServer will +// result in compilation errors. +type UnsafeAccountingServiceServer interface { + mustEmbedUnimplementedAccountingServiceServer() +} + +func RegisterAccountingServiceServer(s grpc.ServiceRegistrar, srv AccountingServiceServer) { + s.RegisterService(&AccountingService_ServiceDesc, srv) +} + +func _AccountingService_Balance_Handler(srv interface{}, ctx context.Context, dec func(interface{}) error, interceptor grpc.UnaryServerInterceptor) (interface{}, error) { + in := new(BalanceRequest) + if err := dec(in); err != nil { + return nil, err + } + if interceptor == nil { + return srv.(AccountingServiceServer).Balance(ctx, in) + } + info := &grpc.UnaryServerInfo{ + Server: srv, + FullMethod: AccountingService_Balance_FullMethodName, + } + handler := func(ctx context.Context, req interface{}) (interface{}, error) { + return srv.(AccountingServiceServer).Balance(ctx, req.(*BalanceRequest)) + } + return interceptor(ctx, in, info, handler) +} + +// AccountingService_ServiceDesc is the grpc.ServiceDesc for AccountingService service. +// It's only intended for direct use with grpc.RegisterService, +// and not to be introspected or modified (even as a copy) +var AccountingService_ServiceDesc = grpc.ServiceDesc{ + ServiceName: "neo.fs.v2.accounting.AccountingService", + HandlerType: (*AccountingServiceServer)(nil), + Methods: []grpc.MethodDesc{ + { + MethodName: "Balance", + Handler: _AccountingService_Balance_Handler, + }, + }, + Streams: []grpc.StreamDesc{}, + Metadata: "accounting/grpc/service.proto", +} diff --git a/api/accounting/types.pb.go b/api/accounting/types.pb.go new file mode 100644 index 000000000..be4f26704 --- /dev/null +++ b/api/accounting/types.pb.go @@ -0,0 +1,168 @@ +// Code generated by protoc-gen-go. DO NOT EDIT. +// versions: +// protoc-gen-go v1.32.0 +// protoc v4.25.1 +// source: accounting/grpc/types.proto + +package accounting + +import ( + protoreflect "google.golang.org/protobuf/reflect/protoreflect" + protoimpl "google.golang.org/protobuf/runtime/protoimpl" + reflect "reflect" + sync "sync" +) + +const ( + // Verify that this generated code is sufficiently up-to-date. + _ = protoimpl.EnforceVersion(20 - protoimpl.MinVersion) + // Verify that runtime/protoimpl is sufficiently up-to-date. + _ = protoimpl.EnforceVersion(protoimpl.MaxVersion - 20) +) + +// Standard floating point data type can't be used in NeoFS due to inexactness +// of the result when doing lots of small number operations. To solve the lost +// precision issue, special `Decimal` format is used for monetary computations. +// +// Please see [The General Decimal Arithmetic +// Specification](http://speleotrove.com/decimal/) for detailed problem +// description. +type Decimal struct { + state protoimpl.MessageState + sizeCache protoimpl.SizeCache + unknownFields protoimpl.UnknownFields + + // Number in the smallest Token fractions. + Value int64 `protobuf:"varint,1,opt,name=value,proto3" json:"value,omitempty"` + // Precision value indicating how many smallest fractions can be in one + // integer. + Precision uint32 `protobuf:"varint,2,opt,name=precision,proto3" json:"precision,omitempty"` +} + +func (x *Decimal) Reset() { + *x = Decimal{} + if protoimpl.UnsafeEnabled { + mi := &file_accounting_grpc_types_proto_msgTypes[0] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) + } +} + +func (x *Decimal) String() string { + return protoimpl.X.MessageStringOf(x) +} + +func (*Decimal) ProtoMessage() {} + +func (x *Decimal) ProtoReflect() protoreflect.Message { + mi := &file_accounting_grpc_types_proto_msgTypes[0] + if protoimpl.UnsafeEnabled && x != nil { + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + if ms.LoadMessageInfo() == nil { + ms.StoreMessageInfo(mi) + } + return ms + } + return mi.MessageOf(x) +} + +// Deprecated: Use Decimal.ProtoReflect.Descriptor instead. +func (*Decimal) Descriptor() ([]byte, []int) { + return file_accounting_grpc_types_proto_rawDescGZIP(), []int{0} +} + +func (x *Decimal) GetValue() int64 { + if x != nil { + return x.Value + } + return 0 +} + +func (x *Decimal) GetPrecision() uint32 { + if x != nil { + return x.Precision + } + return 0 +} + +var File_accounting_grpc_types_proto protoreflect.FileDescriptor + +var file_accounting_grpc_types_proto_rawDesc = []byte{ + 0x0a, 0x1b, 0x61, 0x63, 0x63, 0x6f, 0x75, 0x6e, 0x74, 0x69, 0x6e, 0x67, 0x2f, 0x67, 0x72, 0x70, + 0x63, 0x2f, 0x74, 0x79, 0x70, 0x65, 0x73, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x12, 0x14, 0x6e, + 0x65, 0x6f, 0x2e, 0x66, 0x73, 0x2e, 0x76, 0x32, 0x2e, 0x61, 0x63, 0x63, 0x6f, 0x75, 0x6e, 0x74, + 0x69, 0x6e, 0x67, 0x22, 0x3d, 0x0a, 0x07, 0x44, 0x65, 0x63, 0x69, 0x6d, 0x61, 0x6c, 0x12, 0x14, + 0x0a, 0x05, 0x76, 0x61, 0x6c, 0x75, 0x65, 0x18, 0x01, 0x20, 0x01, 0x28, 0x03, 0x52, 0x05, 0x76, + 0x61, 0x6c, 0x75, 0x65, 0x12, 0x1c, 0x0a, 0x09, 0x70, 0x72, 0x65, 0x63, 0x69, 0x73, 0x69, 0x6f, + 0x6e, 0x18, 0x02, 0x20, 0x01, 0x28, 0x0d, 0x52, 0x09, 0x70, 0x72, 0x65, 0x63, 0x69, 0x73, 0x69, + 0x6f, 0x6e, 0x42, 0x62, 0x5a, 0x3f, 0x67, 0x69, 0x74, 0x68, 0x75, 0x62, 0x2e, 0x63, 0x6f, 0x6d, + 0x2f, 0x6e, 0x73, 0x70, 0x63, 0x63, 0x2d, 0x64, 0x65, 0x76, 0x2f, 0x6e, 0x65, 0x6f, 0x66, 0x73, + 0x2d, 0x61, 0x70, 0x69, 0x2d, 0x67, 0x6f, 0x2f, 0x76, 0x32, 0x2f, 0x61, 0x63, 0x63, 0x6f, 0x75, + 0x6e, 0x74, 0x69, 0x6e, 0x67, 0x2f, 0x67, 0x72, 0x70, 0x63, 0x3b, 0x61, 0x63, 0x63, 0x6f, 0x75, + 0x6e, 0x74, 0x69, 0x6e, 0x67, 0xaa, 0x02, 0x1e, 0x4e, 0x65, 0x6f, 0x2e, 0x46, 0x69, 0x6c, 0x65, + 0x53, 0x74, 0x6f, 0x72, 0x61, 0x67, 0x65, 0x2e, 0x41, 0x50, 0x49, 0x2e, 0x41, 0x63, 0x63, 0x6f, + 0x75, 0x6e, 0x74, 0x69, 0x6e, 0x67, 0x62, 0x06, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x33, +} + +var ( + file_accounting_grpc_types_proto_rawDescOnce sync.Once + file_accounting_grpc_types_proto_rawDescData = file_accounting_grpc_types_proto_rawDesc +) + +func file_accounting_grpc_types_proto_rawDescGZIP() []byte { + file_accounting_grpc_types_proto_rawDescOnce.Do(func() { + file_accounting_grpc_types_proto_rawDescData = protoimpl.X.CompressGZIP(file_accounting_grpc_types_proto_rawDescData) + }) + return file_accounting_grpc_types_proto_rawDescData +} + +var file_accounting_grpc_types_proto_msgTypes = make([]protoimpl.MessageInfo, 1) +var file_accounting_grpc_types_proto_goTypes = []interface{}{ + (*Decimal)(nil), // 0: neo.fs.v2.accounting.Decimal +} +var file_accounting_grpc_types_proto_depIdxs = []int32{ + 0, // [0:0] is the sub-list for method output_type + 0, // [0:0] is the sub-list for method input_type + 0, // [0:0] is the sub-list for extension type_name + 0, // [0:0] is the sub-list for extension extendee + 0, // [0:0] is the sub-list for field type_name +} + +func init() { file_accounting_grpc_types_proto_init() } +func file_accounting_grpc_types_proto_init() { + if File_accounting_grpc_types_proto != nil { + return + } + if !protoimpl.UnsafeEnabled { + file_accounting_grpc_types_proto_msgTypes[0].Exporter = func(v interface{}, i int) interface{} { + switch v := v.(*Decimal); i { + case 0: + return &v.state + case 1: + return &v.sizeCache + case 2: + return &v.unknownFields + default: + return nil + } + } + } + type x struct{} + out := protoimpl.TypeBuilder{ + File: protoimpl.DescBuilder{ + GoPackagePath: reflect.TypeOf(x{}).PkgPath(), + RawDescriptor: file_accounting_grpc_types_proto_rawDesc, + NumEnums: 0, + NumMessages: 1, + NumExtensions: 0, + NumServices: 0, + }, + GoTypes: file_accounting_grpc_types_proto_goTypes, + DependencyIndexes: file_accounting_grpc_types_proto_depIdxs, + MessageInfos: file_accounting_grpc_types_proto_msgTypes, + }.Build() + File_accounting_grpc_types_proto = out.File + file_accounting_grpc_types_proto_rawDesc = nil + file_accounting_grpc_types_proto_goTypes = nil + file_accounting_grpc_types_proto_depIdxs = nil +} diff --git a/api/acl/encoding.go b/api/acl/encoding.go new file mode 100644 index 000000000..cfda41547 --- /dev/null +++ b/api/acl/encoding.go @@ -0,0 +1,195 @@ +package acl + +import ( + "github.com/nspcc-dev/neofs-sdk-go/internal/proto" +) + +const ( + _ = iota + fieldEACLVersion + fieldEACLContainer + fieldEACLRecords +) + +func (x *EACLTable) MarshaledSize() int { + var sz int + if x != nil { + sz = proto.SizeNested(fieldEACLVersion, x.Version) + + proto.SizeNested(fieldEACLContainer, x.ContainerId) + for i := range x.Records { + sz += proto.SizeNested(fieldEACLRecords, x.Records[i]) + } + } + return sz +} + +func (x *EACLTable) MarshalStable(b []byte) { + if x != nil { + off := proto.MarshalNested(b, fieldEACLVersion, x.Version) + off += proto.MarshalNested(b[off:], fieldEACLContainer, x.ContainerId) + for i := range x.Records { + off += proto.MarshalNested(b[off:], fieldEACLRecords, x.Records[i]) + } + } +} + +const ( + _ = iota + fieldEACLOp + fieldEACLAction + fieldEACLFilters + fieldEACLTargets +) + +func (x *EACLRecord) MarshaledSize() int { + var sz int + if x != nil { + sz = proto.SizeVarint(fieldEACLOp, int32(x.Operation)) + + proto.SizeVarint(fieldEACLAction, int32(x.Action)) + for i := range x.Filters { + sz += proto.SizeNested(fieldEACLFilters, x.Filters[i]) + } + for i := range x.Targets { + sz += proto.SizeNested(fieldEACLTargets, x.Targets[i]) + } + } + return sz +} + +func (x *EACLRecord) MarshalStable(b []byte) { + if x != nil { + off := proto.MarshalVarint(b, fieldEACLOp, int32(x.Operation)) + off += proto.MarshalVarint(b[off:], fieldEACLAction, int32(x.Action)) + for i := range x.Filters { + off += proto.MarshalNested(b[off:], fieldEACLFilters, x.Filters[i]) + } + for i := range x.Targets { + off += proto.MarshalNested(b[off:], fieldEACLTargets, x.Targets[i]) + } + } +} + +const ( + _ = iota + fieldEACLHeader + fieldEACLMatcher + fieldEACLKey + fieldEACLValue +) + +func (x *EACLRecord_Filter) MarshaledSize() int { + var sz int + if x != nil { + sz = proto.SizeVarint(fieldEACLHeader, int32(x.HeaderType)) + + proto.SizeVarint(fieldEACLMatcher, int32(x.MatchType)) + + proto.SizeBytes(fieldEACLKey, x.Key) + + proto.SizeBytes(fieldEACLValue, x.Value) + } + return sz +} + +func (x *EACLRecord_Filter) MarshalStable(b []byte) { + if x != nil { + off := proto.MarshalVarint(b, fieldEACLHeader, int32(x.HeaderType)) + off += proto.MarshalVarint(b[off:], fieldEACLMatcher, int32(x.MatchType)) + off += proto.MarshalBytes(b[off:], fieldEACLKey, x.Key) + proto.MarshalBytes(b[off:], fieldEACLValue, x.Value) + } +} + +const ( + _ = iota + fieldEACLRole + fieldEACLTargetKeys +) + +func (x *EACLRecord_Target) MarshaledSize() int { + var sz int + if x != nil { + sz = proto.SizeVarint(fieldEACLRole, int32(x.Role)) + + proto.SizeRepeatedBytes(fieldEACLTargetKeys, x.Keys) + } + return sz +} + +func (x *EACLRecord_Target) MarshalStable(b []byte) { + if x != nil { + off := proto.MarshalVarint(b, fieldEACLRole, int32(x.Role)) + proto.MarshalRepeatedBytes(b[off:], fieldEACLTargetKeys, x.Keys) + } +} + +const ( + _ = iota + fieldBearerExp + fieldBearerNbf + fieldBearerIat +) + +func (x *BearerToken_Body_TokenLifetime) MarshaledSize() int { + var sz int + if x != nil { + sz = proto.SizeVarint(fieldBearerExp, x.Exp) + + proto.SizeVarint(fieldBearerNbf, x.Nbf) + + proto.SizeVarint(fieldBearerIat, x.Iat) + } + return sz +} + +func (x *BearerToken_Body_TokenLifetime) MarshalStable(b []byte) { + if x != nil { + off := proto.MarshalVarint(b, fieldBearerExp, x.Exp) + off += proto.MarshalVarint(b[off:], fieldBearerNbf, x.Nbf) + proto.MarshalVarint(b[off:], fieldBearerIat, x.Iat) + } +} + +const ( + _ = iota + fieldBearerEACL + fieldBearerOwner + fieldBearerLifetime + fieldBearerIssuer +) + +func (x *BearerToken_Body) MarshaledSize() int { + var sz int + if x != nil { + sz = proto.SizeNested(fieldBearerEACL, x.EaclTable) + + proto.SizeNested(fieldBearerOwner, x.OwnerId) + + proto.SizeNested(fieldBearerLifetime, x.Lifetime) + + proto.SizeNested(fieldBearerIssuer, x.Issuer) + } + return sz +} + +func (x *BearerToken_Body) MarshalStable(b []byte) { + if x != nil { + off := proto.MarshalNested(b, fieldBearerEACL, x.EaclTable) + off += proto.MarshalNested(b[off:], fieldBearerOwner, x.OwnerId) + off += proto.MarshalNested(b[off:], fieldBearerLifetime, x.Lifetime) + proto.MarshalNested(b[off:], fieldBearerIssuer, x.Issuer) + } +} + +const ( + _ = iota + fieldBearerBody + fieldBearerSignature +) + +func (x *BearerToken) MarshaledSize() int { + var sz int + if x != nil { + sz = proto.SizeNested(fieldBearerBody, x.Body) + + proto.SizeNested(fieldBearerSignature, x.Signature) + } + return sz +} + +func (x *BearerToken) MarshalStable(b []byte) { + if x != nil { + off := proto.MarshalNested(b, fieldBearerBody, x.Body) + proto.MarshalNested(b[off:], fieldBearerSignature, x.Signature) + } +} diff --git a/api/acl/encoding_test.go b/api/acl/encoding_test.go new file mode 100644 index 000000000..f69992f83 --- /dev/null +++ b/api/acl/encoding_test.go @@ -0,0 +1,64 @@ +package acl_test + +import ( + "testing" + + "github.com/nspcc-dev/neofs-sdk-go/api/acl" + "github.com/nspcc-dev/neofs-sdk-go/api/refs" + "github.com/stretchr/testify/require" + "google.golang.org/protobuf/proto" +) + +func TestBearerToken(t *testing.T) { + v := &acl.BearerToken{ + Body: &acl.BearerToken_Body{ + EaclTable: &acl.EACLTable{ + Version: &refs.Version{Major: 123, Minor: 456}, + ContainerId: &refs.ContainerID{Value: []byte("any_container")}, + Records: []*acl.EACLRecord{ + { + Operation: 1, Action: 2, + Filters: []*acl.EACLRecord_Filter{ + {HeaderType: 3, MatchType: 4, Key: "key1", Value: "val1"}, + {HeaderType: 5, MatchType: 6, Key: "key2", Value: "val2"}, + }, + Targets: []*acl.EACLRecord_Target{ + {Role: 7, Keys: [][]byte{{0}, {1}}}, + {Role: 8, Keys: [][]byte{{2}, {3}}}, + }, + }, + { + Operation: 9, Action: 10, + Filters: []*acl.EACLRecord_Filter{ + {HeaderType: 11, MatchType: 12, Key: "key3", Value: "val3"}, + {HeaderType: 13, MatchType: 14, Key: "key4", Value: "val4"}, + }, + Targets: []*acl.EACLRecord_Target{ + {Role: 15, Keys: [][]byte{{4}, {5}}}, + {Role: 16, Keys: [][]byte{{6}, {7}}}, + }, + }, + }, + }, + OwnerId: &refs.OwnerID{Value: []byte("any_owner")}, + Lifetime: &acl.BearerToken_Body_TokenLifetime{Exp: 17, Nbf: 18, Iat: 19}, + Issuer: &refs.OwnerID{Value: []byte("any_issuer")}, + }, + Signature: &refs.Signature{ + Key: []byte("any_public_key"), + Sign: []byte("any_signature"), + Scheme: 100, + }, + } + + sz := v.MarshaledSize() + b := make([]byte, sz) + v.MarshalStable(b) + + var res acl.BearerToken + err := proto.Unmarshal(b, &res) + require.NoError(t, err) + require.Empty(t, res.ProtoReflect().GetUnknown()) + require.Equal(t, v.Body, res.Body) + require.Equal(t, v.Signature, res.Signature) +} diff --git a/api/acl/types.pb.go b/api/acl/types.pb.go new file mode 100644 index 000000000..6459a9a9c --- /dev/null +++ b/api/acl/types.pb.go @@ -0,0 +1,1155 @@ +// Code generated by protoc-gen-go. DO NOT EDIT. +// versions: +// protoc-gen-go v1.32.0 +// protoc v4.25.1 +// source: acl/grpc/types.proto + +package acl + +import ( + reflect "reflect" + sync "sync" + + "github.com/nspcc-dev/neofs-sdk-go/api/refs" + protoreflect "google.golang.org/protobuf/reflect/protoreflect" + protoimpl "google.golang.org/protobuf/runtime/protoimpl" +) + +const ( + // Verify that this generated code is sufficiently up-to-date. + _ = protoimpl.EnforceVersion(20 - protoimpl.MinVersion) + // Verify that runtime/protoimpl is sufficiently up-to-date. + _ = protoimpl.EnforceVersion(protoimpl.MaxVersion - 20) +) + +// Target role of the access control rule in access control list. +type Role int32 + +const ( + // Unspecified role, default value + Role_ROLE_UNSPECIFIED Role = 0 + // User target rule is applied if sender is the owner of the container + Role_USER Role = 1 + // System target rule is applied if sender is a storage node within the + // container or an inner ring node + Role_SYSTEM Role = 2 + // Others target rule is applied if sender is neither a user nor a system target + Role_OTHERS Role = 3 +) + +// Enum value maps for Role. +var ( + Role_name = map[int32]string{ + 0: "ROLE_UNSPECIFIED", + 1: "USER", + 2: "SYSTEM", + 3: "OTHERS", + } + Role_value = map[string]int32{ + "ROLE_UNSPECIFIED": 0, + "USER": 1, + "SYSTEM": 2, + "OTHERS": 3, + } +) + +func (x Role) Enum() *Role { + p := new(Role) + *p = x + return p +} + +func (x Role) String() string { + return protoimpl.X.EnumStringOf(x.Descriptor(), protoreflect.EnumNumber(x)) +} + +func (Role) Descriptor() protoreflect.EnumDescriptor { + return file_acl_grpc_types_proto_enumTypes[0].Descriptor() +} + +func (Role) Type() protoreflect.EnumType { + return &file_acl_grpc_types_proto_enumTypes[0] +} + +func (x Role) Number() protoreflect.EnumNumber { + return protoreflect.EnumNumber(x) +} + +// Deprecated: Use Role.Descriptor instead. +func (Role) EnumDescriptor() ([]byte, []int) { + return file_acl_grpc_types_proto_rawDescGZIP(), []int{0} +} + +// MatchType is an enumeration of match types. +type MatchType int32 + +const ( + // Unspecified match type, default value. + MatchType_MATCH_TYPE_UNSPECIFIED MatchType = 0 + // Return true if strings are equal + MatchType_STRING_EQUAL MatchType = 1 + // Return true if strings are different + MatchType_STRING_NOT_EQUAL MatchType = 2 + // Absence of attribute + MatchType_NOT_PRESENT MatchType = 3 + // Numeric 'greater than' + MatchType_NUM_GT MatchType = 4 + // Numeric 'greater or equal than' + MatchType_NUM_GE MatchType = 5 + // Numeric 'less than' + MatchType_NUM_LT MatchType = 6 + // Numeric 'less or equal than' + MatchType_NUM_LE MatchType = 7 +) + +// Enum value maps for MatchType. +var ( + MatchType_name = map[int32]string{ + 0: "MATCH_TYPE_UNSPECIFIED", + 1: "STRING_EQUAL", + 2: "STRING_NOT_EQUAL", + 3: "NOT_PRESENT", + 4: "NUM_GT", + 5: "NUM_GE", + 6: "NUM_LT", + 7: "NUM_LE", + } + MatchType_value = map[string]int32{ + "MATCH_TYPE_UNSPECIFIED": 0, + "STRING_EQUAL": 1, + "STRING_NOT_EQUAL": 2, + "NOT_PRESENT": 3, + "NUM_GT": 4, + "NUM_GE": 5, + "NUM_LT": 6, + "NUM_LE": 7, + } +) + +func (x MatchType) Enum() *MatchType { + p := new(MatchType) + *p = x + return p +} + +func (x MatchType) String() string { + return protoimpl.X.EnumStringOf(x.Descriptor(), protoreflect.EnumNumber(x)) +} + +func (MatchType) Descriptor() protoreflect.EnumDescriptor { + return file_acl_grpc_types_proto_enumTypes[1].Descriptor() +} + +func (MatchType) Type() protoreflect.EnumType { + return &file_acl_grpc_types_proto_enumTypes[1] +} + +func (x MatchType) Number() protoreflect.EnumNumber { + return protoreflect.EnumNumber(x) +} + +// Deprecated: Use MatchType.Descriptor instead. +func (MatchType) EnumDescriptor() ([]byte, []int) { + return file_acl_grpc_types_proto_rawDescGZIP(), []int{1} +} + +// Request's operation type to match if the rule is applicable to a particular +// request. +type Operation int32 + +const ( + // Unspecified operation, default value + Operation_OPERATION_UNSPECIFIED Operation = 0 + // Get + Operation_GET Operation = 1 + // Head + Operation_HEAD Operation = 2 + // Put + Operation_PUT Operation = 3 + // Delete + Operation_DELETE Operation = 4 + // Search + Operation_SEARCH Operation = 5 + // GetRange + Operation_GETRANGE Operation = 6 + // GetRangeHash + Operation_GETRANGEHASH Operation = 7 +) + +// Enum value maps for Operation. +var ( + Operation_name = map[int32]string{ + 0: "OPERATION_UNSPECIFIED", + 1: "GET", + 2: "HEAD", + 3: "PUT", + 4: "DELETE", + 5: "SEARCH", + 6: "GETRANGE", + 7: "GETRANGEHASH", + } + Operation_value = map[string]int32{ + "OPERATION_UNSPECIFIED": 0, + "GET": 1, + "HEAD": 2, + "PUT": 3, + "DELETE": 4, + "SEARCH": 5, + "GETRANGE": 6, + "GETRANGEHASH": 7, + } +) + +func (x Operation) Enum() *Operation { + p := new(Operation) + *p = x + return p +} + +func (x Operation) String() string { + return protoimpl.X.EnumStringOf(x.Descriptor(), protoreflect.EnumNumber(x)) +} + +func (Operation) Descriptor() protoreflect.EnumDescriptor { + return file_acl_grpc_types_proto_enumTypes[2].Descriptor() +} + +func (Operation) Type() protoreflect.EnumType { + return &file_acl_grpc_types_proto_enumTypes[2] +} + +func (x Operation) Number() protoreflect.EnumNumber { + return protoreflect.EnumNumber(x) +} + +// Deprecated: Use Operation.Descriptor instead. +func (Operation) EnumDescriptor() ([]byte, []int) { + return file_acl_grpc_types_proto_rawDescGZIP(), []int{2} +} + +// Rule execution result action. Either allows or denies access if the rule's +// filters match. +type Action int32 + +const ( + // Unspecified action, default value + Action_ACTION_UNSPECIFIED Action = 0 + // Allow action + Action_ALLOW Action = 1 + // Deny action + Action_DENY Action = 2 +) + +// Enum value maps for Action. +var ( + Action_name = map[int32]string{ + 0: "ACTION_UNSPECIFIED", + 1: "ALLOW", + 2: "DENY", + } + Action_value = map[string]int32{ + "ACTION_UNSPECIFIED": 0, + "ALLOW": 1, + "DENY": 2, + } +) + +func (x Action) Enum() *Action { + p := new(Action) + *p = x + return p +} + +func (x Action) String() string { + return protoimpl.X.EnumStringOf(x.Descriptor(), protoreflect.EnumNumber(x)) +} + +func (Action) Descriptor() protoreflect.EnumDescriptor { + return file_acl_grpc_types_proto_enumTypes[3].Descriptor() +} + +func (Action) Type() protoreflect.EnumType { + return &file_acl_grpc_types_proto_enumTypes[3] +} + +func (x Action) Number() protoreflect.EnumNumber { + return protoreflect.EnumNumber(x) +} + +// Deprecated: Use Action.Descriptor instead. +func (Action) EnumDescriptor() ([]byte, []int) { + return file_acl_grpc_types_proto_rawDescGZIP(), []int{3} +} + +// Enumeration of possible sources of Headers to apply filters. +type HeaderType int32 + +const ( + // Unspecified header, default value. + HeaderType_HEADER_UNSPECIFIED HeaderType = 0 + // Filter request headers + HeaderType_REQUEST HeaderType = 1 + // Filter object headers + HeaderType_OBJECT HeaderType = 2 + // Filter service headers. These are not processed by NeoFS nodes and + // exist for service use only. + HeaderType_SERVICE HeaderType = 3 +) + +// Enum value maps for HeaderType. +var ( + HeaderType_name = map[int32]string{ + 0: "HEADER_UNSPECIFIED", + 1: "REQUEST", + 2: "OBJECT", + 3: "SERVICE", + } + HeaderType_value = map[string]int32{ + "HEADER_UNSPECIFIED": 0, + "REQUEST": 1, + "OBJECT": 2, + "SERVICE": 3, + } +) + +func (x HeaderType) Enum() *HeaderType { + p := new(HeaderType) + *p = x + return p +} + +func (x HeaderType) String() string { + return protoimpl.X.EnumStringOf(x.Descriptor(), protoreflect.EnumNumber(x)) +} + +func (HeaderType) Descriptor() protoreflect.EnumDescriptor { + return file_acl_grpc_types_proto_enumTypes[4].Descriptor() +} + +func (HeaderType) Type() protoreflect.EnumType { + return &file_acl_grpc_types_proto_enumTypes[4] +} + +func (x HeaderType) Number() protoreflect.EnumNumber { + return protoreflect.EnumNumber(x) +} + +// Deprecated: Use HeaderType.Descriptor instead. +func (HeaderType) EnumDescriptor() ([]byte, []int) { + return file_acl_grpc_types_proto_rawDescGZIP(), []int{4} +} + +// Describes a single eACL rule. +type EACLRecord struct { + state protoimpl.MessageState + sizeCache protoimpl.SizeCache + unknownFields protoimpl.UnknownFields + + // NeoFS request Verb to match + Operation Operation `protobuf:"varint,1,opt,name=operation,proto3,enum=neo.fs.v2.acl.Operation" json:"operation,omitempty"` + // Rule execution result. Either allows or denies access if filters match. + Action Action `protobuf:"varint,2,opt,name=action,proto3,enum=neo.fs.v2.acl.Action" json:"action,omitempty"` + // List of filters to match and see if rule is applicable + Filters []*EACLRecord_Filter `protobuf:"bytes,3,rep,name=filters,proto3" json:"filters,omitempty"` + // List of target subjects to apply ACL rule to + Targets []*EACLRecord_Target `protobuf:"bytes,4,rep,name=targets,proto3" json:"targets,omitempty"` +} + +func (x *EACLRecord) Reset() { + *x = EACLRecord{} + if protoimpl.UnsafeEnabled { + mi := &file_acl_grpc_types_proto_msgTypes[0] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) + } +} + +func (x *EACLRecord) String() string { + return protoimpl.X.MessageStringOf(x) +} + +func (*EACLRecord) ProtoMessage() {} + +func (x *EACLRecord) ProtoReflect() protoreflect.Message { + mi := &file_acl_grpc_types_proto_msgTypes[0] + if protoimpl.UnsafeEnabled && x != nil { + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + if ms.LoadMessageInfo() == nil { + ms.StoreMessageInfo(mi) + } + return ms + } + return mi.MessageOf(x) +} + +// Deprecated: Use EACLRecord.ProtoReflect.Descriptor instead. +func (*EACLRecord) Descriptor() ([]byte, []int) { + return file_acl_grpc_types_proto_rawDescGZIP(), []int{0} +} + +func (x *EACLRecord) GetOperation() Operation { + if x != nil { + return x.Operation + } + return Operation_OPERATION_UNSPECIFIED +} + +func (x *EACLRecord) GetAction() Action { + if x != nil { + return x.Action + } + return Action_ACTION_UNSPECIFIED +} + +func (x *EACLRecord) GetFilters() []*EACLRecord_Filter { + if x != nil { + return x.Filters + } + return nil +} + +func (x *EACLRecord) GetTargets() []*EACLRecord_Target { + if x != nil { + return x.Targets + } + return nil +} + +// Extended ACL rules table. A list of ACL rules defined additionally to Basic +// ACL. Extended ACL rules can be attached to a container and can be updated +// or may be defined in `BearerToken` structure. Please see the corresponding +// NeoFS Technical Specification section for detailed description. +type EACLTable struct { + state protoimpl.MessageState + sizeCache protoimpl.SizeCache + unknownFields protoimpl.UnknownFields + + // eACL format version. Effectively, the version of API library used to create + // eACL Table. + Version *refs.Version `protobuf:"bytes,1,opt,name=version,proto3" json:"version,omitempty"` + // Identifier of the container that should use given access control rules + ContainerId *refs.ContainerID `protobuf:"bytes,2,opt,name=container_id,json=containerID,proto3" json:"container_id,omitempty"` + // List of Extended ACL rules + Records []*EACLRecord `protobuf:"bytes,3,rep,name=records,proto3" json:"records,omitempty"` +} + +func (x *EACLTable) Reset() { + *x = EACLTable{} + if protoimpl.UnsafeEnabled { + mi := &file_acl_grpc_types_proto_msgTypes[1] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) + } +} + +func (x *EACLTable) String() string { + return protoimpl.X.MessageStringOf(x) +} + +func (*EACLTable) ProtoMessage() {} + +func (x *EACLTable) ProtoReflect() protoreflect.Message { + mi := &file_acl_grpc_types_proto_msgTypes[1] + if protoimpl.UnsafeEnabled && x != nil { + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + if ms.LoadMessageInfo() == nil { + ms.StoreMessageInfo(mi) + } + return ms + } + return mi.MessageOf(x) +} + +// Deprecated: Use EACLTable.ProtoReflect.Descriptor instead. +func (*EACLTable) Descriptor() ([]byte, []int) { + return file_acl_grpc_types_proto_rawDescGZIP(), []int{1} +} + +func (x *EACLTable) GetVersion() *refs.Version { + if x != nil { + return x.Version + } + return nil +} + +func (x *EACLTable) GetContainerId() *refs.ContainerID { + if x != nil { + return x.ContainerId + } + return nil +} + +func (x *EACLTable) GetRecords() []*EACLRecord { + if x != nil { + return x.Records + } + return nil +} + +// BearerToken allows to attach signed Extended ACL rules to the request in +// `RequestMetaHeader`. If container's Basic ACL rules allow, the attached rule +// set will be checked instead of one attached to the container itself. Just +// like [JWT](https://jwt.io), it has a limited lifetime and scope, hence can be +// used in the similar use cases, like providing authorisation to externally +// authenticated party. +// +// BearerToken can be issued only by the container's owner and must be signed using +// the key associated with the container's `OwnerID`. +type BearerToken struct { + state protoimpl.MessageState + sizeCache protoimpl.SizeCache + unknownFields protoimpl.UnknownFields + + // Bearer Token body + Body *BearerToken_Body `protobuf:"bytes,1,opt,name=body,proto3" json:"body,omitempty"` + // Signature of BearerToken body + Signature *refs.Signature `protobuf:"bytes,2,opt,name=signature,proto3" json:"signature,omitempty"` +} + +func (x *BearerToken) Reset() { + *x = BearerToken{} + if protoimpl.UnsafeEnabled { + mi := &file_acl_grpc_types_proto_msgTypes[2] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) + } +} + +func (x *BearerToken) String() string { + return protoimpl.X.MessageStringOf(x) +} + +func (*BearerToken) ProtoMessage() {} + +func (x *BearerToken) ProtoReflect() protoreflect.Message { + mi := &file_acl_grpc_types_proto_msgTypes[2] + if protoimpl.UnsafeEnabled && x != nil { + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + if ms.LoadMessageInfo() == nil { + ms.StoreMessageInfo(mi) + } + return ms + } + return mi.MessageOf(x) +} + +// Deprecated: Use BearerToken.ProtoReflect.Descriptor instead. +func (*BearerToken) Descriptor() ([]byte, []int) { + return file_acl_grpc_types_proto_rawDescGZIP(), []int{2} +} + +func (x *BearerToken) GetBody() *BearerToken_Body { + if x != nil { + return x.Body + } + return nil +} + +func (x *BearerToken) GetSignature() *refs.Signature { + if x != nil { + return x.Signature + } + return nil +} + +// Filter to check particular properties of the request or the object. +// +// The `value` field must be empty if `match_type` is an unary operator +// (e.g. `NOT_PRESENT`). If `match_type` field is numeric (e.g. `NUM_GT`), +// the `value` field must be a base-10 integer. +// +// By default `key` field refers to the corresponding object's `Attribute`. +// Some Object's header fields can also be accessed by adding `$Object:` +// prefix to the name. For such attributes, field 'match_type' must not be +// 'NOT_PRESENT'. Here is the list of fields available via this prefix: +// +// - $Object:version \ +// version +// - $Object:objectID \ +// object_id +// - $Object:containerID \ +// container_id +// - $Object:ownerID \ +// owner_id +// - $Object:creationEpoch \ +// creation_epoch +// - $Object:payloadLength \ +// payload_length +// - $Object:payloadHash \ +// payload_hash +// - $Object:objectType \ +// object_type +// - $Object:homomorphicHash \ +// homomorphic_hash +// +// Numeric `match_type` field can only be used with `$Object:creationEpoch` +// and `$Object:payloadLength` system attributes. +// +// Please note, that if request or response does not have object's headers of +// full object (Range, RangeHash, Search, Delete), it will not be possible to +// filter by object header fields or user attributes. From the well-known list +// only `$Object:objectID` and `$Object:containerID` will be available, as +// it's possible to take that information from the requested address. +type EACLRecord_Filter struct { + state protoimpl.MessageState + sizeCache protoimpl.SizeCache + unknownFields protoimpl.UnknownFields + + // Define if Object or Request header will be used + HeaderType HeaderType `protobuf:"varint,1,opt,name=header_type,json=headerType,proto3,enum=neo.fs.v2.acl.HeaderType" json:"header_type,omitempty"` + // Match operation type + MatchType MatchType `protobuf:"varint,2,opt,name=match_type,json=matchType,proto3,enum=neo.fs.v2.acl.MatchType" json:"match_type,omitempty"` + // Name of the Header to use + Key string `protobuf:"bytes,3,opt,name=key,proto3" json:"key,omitempty"` + // Expected Header Value or pattern to match + Value string `protobuf:"bytes,4,opt,name=value,proto3" json:"value,omitempty"` +} + +func (x *EACLRecord_Filter) Reset() { + *x = EACLRecord_Filter{} + if protoimpl.UnsafeEnabled { + mi := &file_acl_grpc_types_proto_msgTypes[3] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) + } +} + +func (x *EACLRecord_Filter) String() string { + return protoimpl.X.MessageStringOf(x) +} + +func (*EACLRecord_Filter) ProtoMessage() {} + +func (x *EACLRecord_Filter) ProtoReflect() protoreflect.Message { + mi := &file_acl_grpc_types_proto_msgTypes[3] + if protoimpl.UnsafeEnabled && x != nil { + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + if ms.LoadMessageInfo() == nil { + ms.StoreMessageInfo(mi) + } + return ms + } + return mi.MessageOf(x) +} + +// Deprecated: Use EACLRecord_Filter.ProtoReflect.Descriptor instead. +func (*EACLRecord_Filter) Descriptor() ([]byte, []int) { + return file_acl_grpc_types_proto_rawDescGZIP(), []int{0, 0} +} + +func (x *EACLRecord_Filter) GetHeaderType() HeaderType { + if x != nil { + return x.HeaderType + } + return HeaderType_HEADER_UNSPECIFIED +} + +func (x *EACLRecord_Filter) GetMatchType() MatchType { + if x != nil { + return x.MatchType + } + return MatchType_MATCH_TYPE_UNSPECIFIED +} + +func (x *EACLRecord_Filter) GetKey() string { + if x != nil { + return x.Key + } + return "" +} + +func (x *EACLRecord_Filter) GetValue() string { + if x != nil { + return x.Value + } + return "" +} + +// Target to apply ACL rule. Can be a subject's role class or a list of public +// keys to match. +type EACLRecord_Target struct { + state protoimpl.MessageState + sizeCache protoimpl.SizeCache + unknownFields protoimpl.UnknownFields + + // Target subject's role class + Role Role `protobuf:"varint,1,opt,name=role,proto3,enum=neo.fs.v2.acl.Role" json:"role,omitempty"` + // List of public keys to identify target subject + Keys [][]byte `protobuf:"bytes,2,rep,name=keys,proto3" json:"keys,omitempty"` +} + +func (x *EACLRecord_Target) Reset() { + *x = EACLRecord_Target{} + if protoimpl.UnsafeEnabled { + mi := &file_acl_grpc_types_proto_msgTypes[4] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) + } +} + +func (x *EACLRecord_Target) String() string { + return protoimpl.X.MessageStringOf(x) +} + +func (*EACLRecord_Target) ProtoMessage() {} + +func (x *EACLRecord_Target) ProtoReflect() protoreflect.Message { + mi := &file_acl_grpc_types_proto_msgTypes[4] + if protoimpl.UnsafeEnabled && x != nil { + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + if ms.LoadMessageInfo() == nil { + ms.StoreMessageInfo(mi) + } + return ms + } + return mi.MessageOf(x) +} + +// Deprecated: Use EACLRecord_Target.ProtoReflect.Descriptor instead. +func (*EACLRecord_Target) Descriptor() ([]byte, []int) { + return file_acl_grpc_types_proto_rawDescGZIP(), []int{0, 1} +} + +func (x *EACLRecord_Target) GetRole() Role { + if x != nil { + return x.Role + } + return Role_ROLE_UNSPECIFIED +} + +func (x *EACLRecord_Target) GetKeys() [][]byte { + if x != nil { + return x.Keys + } + return nil +} + +// Bearer Token body structure contains Extended ACL table issued by the container +// owner with additional information preventing token abuse. +type BearerToken_Body struct { + state protoimpl.MessageState + sizeCache protoimpl.SizeCache + unknownFields protoimpl.UnknownFields + + // Table of Extended ACL rules to use instead of the ones attached to the + // container. If it contains `container_id` field, bearer token is only + // valid for this specific container. Otherwise, any container of the same owner + // is allowed. + EaclTable *EACLTable `protobuf:"bytes,1,opt,name=eacl_table,json=eaclTable,proto3" json:"eacl_table,omitempty"` + // `OwnerID` defines to whom the token was issued. It must match the request + // originator's `OwnerID`. If empty, any token bearer will be accepted. + OwnerId *refs.OwnerID `protobuf:"bytes,2,opt,name=owner_id,json=ownerID,proto3" json:"owner_id,omitempty"` + // Token expiration and valid time period parameters + Lifetime *BearerToken_Body_TokenLifetime `protobuf:"bytes,3,opt,name=lifetime,proto3" json:"lifetime,omitempty"` + // Token issuer's user ID in NeoFS. It must equal to the related + // container's owner. + Issuer *refs.OwnerID `protobuf:"bytes,4,opt,name=issuer,proto3" json:"issuer,omitempty"` +} + +func (x *BearerToken_Body) Reset() { + *x = BearerToken_Body{} + if protoimpl.UnsafeEnabled { + mi := &file_acl_grpc_types_proto_msgTypes[5] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) + } +} + +func (x *BearerToken_Body) String() string { + return protoimpl.X.MessageStringOf(x) +} + +func (*BearerToken_Body) ProtoMessage() {} + +func (x *BearerToken_Body) ProtoReflect() protoreflect.Message { + mi := &file_acl_grpc_types_proto_msgTypes[5] + if protoimpl.UnsafeEnabled && x != nil { + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + if ms.LoadMessageInfo() == nil { + ms.StoreMessageInfo(mi) + } + return ms + } + return mi.MessageOf(x) +} + +// Deprecated: Use BearerToken_Body.ProtoReflect.Descriptor instead. +func (*BearerToken_Body) Descriptor() ([]byte, []int) { + return file_acl_grpc_types_proto_rawDescGZIP(), []int{2, 0} +} + +func (x *BearerToken_Body) GetEaclTable() *EACLTable { + if x != nil { + return x.EaclTable + } + return nil +} + +func (x *BearerToken_Body) GetOwnerId() *refs.OwnerID { + if x != nil { + return x.OwnerId + } + return nil +} + +func (x *BearerToken_Body) GetLifetime() *BearerToken_Body_TokenLifetime { + if x != nil { + return x.Lifetime + } + return nil +} + +func (x *BearerToken_Body) GetIssuer() *refs.OwnerID { + if x != nil { + return x.Issuer + } + return nil +} + +// Lifetime parameters of the token. Field names taken from +// [rfc7519](https://tools.ietf.org/html/rfc7519). +type BearerToken_Body_TokenLifetime struct { + state protoimpl.MessageState + sizeCache protoimpl.SizeCache + unknownFields protoimpl.UnknownFields + + // Expiration Epoch + Exp uint64 `protobuf:"varint,1,opt,name=exp,proto3" json:"exp,omitempty"` + // Not valid before Epoch + Nbf uint64 `protobuf:"varint,2,opt,name=nbf,proto3" json:"nbf,omitempty"` + // Issued at Epoch + Iat uint64 `protobuf:"varint,3,opt,name=iat,proto3" json:"iat,omitempty"` +} + +func (x *BearerToken_Body_TokenLifetime) Reset() { + *x = BearerToken_Body_TokenLifetime{} + if protoimpl.UnsafeEnabled { + mi := &file_acl_grpc_types_proto_msgTypes[6] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) + } +} + +func (x *BearerToken_Body_TokenLifetime) String() string { + return protoimpl.X.MessageStringOf(x) +} + +func (*BearerToken_Body_TokenLifetime) ProtoMessage() {} + +func (x *BearerToken_Body_TokenLifetime) ProtoReflect() protoreflect.Message { + mi := &file_acl_grpc_types_proto_msgTypes[6] + if protoimpl.UnsafeEnabled && x != nil { + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + if ms.LoadMessageInfo() == nil { + ms.StoreMessageInfo(mi) + } + return ms + } + return mi.MessageOf(x) +} + +// Deprecated: Use BearerToken_Body_TokenLifetime.ProtoReflect.Descriptor instead. +func (*BearerToken_Body_TokenLifetime) Descriptor() ([]byte, []int) { + return file_acl_grpc_types_proto_rawDescGZIP(), []int{2, 0, 0} +} + +func (x *BearerToken_Body_TokenLifetime) GetExp() uint64 { + if x != nil { + return x.Exp + } + return 0 +} + +func (x *BearerToken_Body_TokenLifetime) GetNbf() uint64 { + if x != nil { + return x.Nbf + } + return 0 +} + +func (x *BearerToken_Body_TokenLifetime) GetIat() uint64 { + if x != nil { + return x.Iat + } + return 0 +} + +var File_acl_grpc_types_proto protoreflect.FileDescriptor + +var file_acl_grpc_types_proto_rawDesc = []byte{ + 0x0a, 0x14, 0x61, 0x63, 0x6c, 0x2f, 0x67, 0x72, 0x70, 0x63, 0x2f, 0x74, 0x79, 0x70, 0x65, 0x73, + 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x12, 0x0d, 0x6e, 0x65, 0x6f, 0x2e, 0x66, 0x73, 0x2e, 0x76, + 0x32, 0x2e, 0x61, 0x63, 0x6c, 0x1a, 0x15, 0x72, 0x65, 0x66, 0x73, 0x2f, 0x67, 0x72, 0x70, 0x63, + 0x2f, 0x74, 0x79, 0x70, 0x65, 0x73, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x22, 0xda, 0x03, 0x0a, + 0x0a, 0x45, 0x41, 0x43, 0x4c, 0x52, 0x65, 0x63, 0x6f, 0x72, 0x64, 0x12, 0x36, 0x0a, 0x09, 0x6f, + 0x70, 0x65, 0x72, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x18, 0x01, 0x20, 0x01, 0x28, 0x0e, 0x32, 0x18, + 0x2e, 0x6e, 0x65, 0x6f, 0x2e, 0x66, 0x73, 0x2e, 0x76, 0x32, 0x2e, 0x61, 0x63, 0x6c, 0x2e, 0x4f, + 0x70, 0x65, 0x72, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x52, 0x09, 0x6f, 0x70, 0x65, 0x72, 0x61, 0x74, + 0x69, 0x6f, 0x6e, 0x12, 0x2d, 0x0a, 0x06, 0x61, 0x63, 0x74, 0x69, 0x6f, 0x6e, 0x18, 0x02, 0x20, + 0x01, 0x28, 0x0e, 0x32, 0x15, 0x2e, 0x6e, 0x65, 0x6f, 0x2e, 0x66, 0x73, 0x2e, 0x76, 0x32, 0x2e, + 0x61, 0x63, 0x6c, 0x2e, 0x41, 0x63, 0x74, 0x69, 0x6f, 0x6e, 0x52, 0x06, 0x61, 0x63, 0x74, 0x69, + 0x6f, 0x6e, 0x12, 0x3a, 0x0a, 0x07, 0x66, 0x69, 0x6c, 0x74, 0x65, 0x72, 0x73, 0x18, 0x03, 0x20, + 0x03, 0x28, 0x0b, 0x32, 0x20, 0x2e, 0x6e, 0x65, 0x6f, 0x2e, 0x66, 0x73, 0x2e, 0x76, 0x32, 0x2e, + 0x61, 0x63, 0x6c, 0x2e, 0x45, 0x41, 0x43, 0x4c, 0x52, 0x65, 0x63, 0x6f, 0x72, 0x64, 0x2e, 0x46, + 0x69, 0x6c, 0x74, 0x65, 0x72, 0x52, 0x07, 0x66, 0x69, 0x6c, 0x74, 0x65, 0x72, 0x73, 0x12, 0x3a, + 0x0a, 0x07, 0x74, 0x61, 0x72, 0x67, 0x65, 0x74, 0x73, 0x18, 0x04, 0x20, 0x03, 0x28, 0x0b, 0x32, + 0x20, 0x2e, 0x6e, 0x65, 0x6f, 0x2e, 0x66, 0x73, 0x2e, 0x76, 0x32, 0x2e, 0x61, 0x63, 0x6c, 0x2e, + 0x45, 0x41, 0x43, 0x4c, 0x52, 0x65, 0x63, 0x6f, 0x72, 0x64, 0x2e, 0x54, 0x61, 0x72, 0x67, 0x65, + 0x74, 0x52, 0x07, 0x74, 0x61, 0x72, 0x67, 0x65, 0x74, 0x73, 0x1a, 0xa5, 0x01, 0x0a, 0x06, 0x46, + 0x69, 0x6c, 0x74, 0x65, 0x72, 0x12, 0x3a, 0x0a, 0x0b, 0x68, 0x65, 0x61, 0x64, 0x65, 0x72, 0x5f, + 0x74, 0x79, 0x70, 0x65, 0x18, 0x01, 0x20, 0x01, 0x28, 0x0e, 0x32, 0x19, 0x2e, 0x6e, 0x65, 0x6f, + 0x2e, 0x66, 0x73, 0x2e, 0x76, 0x32, 0x2e, 0x61, 0x63, 0x6c, 0x2e, 0x48, 0x65, 0x61, 0x64, 0x65, + 0x72, 0x54, 0x79, 0x70, 0x65, 0x52, 0x0a, 0x68, 0x65, 0x61, 0x64, 0x65, 0x72, 0x54, 0x79, 0x70, + 0x65, 0x12, 0x37, 0x0a, 0x0a, 0x6d, 0x61, 0x74, 0x63, 0x68, 0x5f, 0x74, 0x79, 0x70, 0x65, 0x18, + 0x02, 0x20, 0x01, 0x28, 0x0e, 0x32, 0x18, 0x2e, 0x6e, 0x65, 0x6f, 0x2e, 0x66, 0x73, 0x2e, 0x76, + 0x32, 0x2e, 0x61, 0x63, 0x6c, 0x2e, 0x4d, 0x61, 0x74, 0x63, 0x68, 0x54, 0x79, 0x70, 0x65, 0x52, + 0x09, 0x6d, 0x61, 0x74, 0x63, 0x68, 0x54, 0x79, 0x70, 0x65, 0x12, 0x10, 0x0a, 0x03, 0x6b, 0x65, + 0x79, 0x18, 0x03, 0x20, 0x01, 0x28, 0x09, 0x52, 0x03, 0x6b, 0x65, 0x79, 0x12, 0x14, 0x0a, 0x05, + 0x76, 0x61, 0x6c, 0x75, 0x65, 0x18, 0x04, 0x20, 0x01, 0x28, 0x09, 0x52, 0x05, 0x76, 0x61, 0x6c, + 0x75, 0x65, 0x1a, 0x45, 0x0a, 0x06, 0x54, 0x61, 0x72, 0x67, 0x65, 0x74, 0x12, 0x27, 0x0a, 0x04, + 0x72, 0x6f, 0x6c, 0x65, 0x18, 0x01, 0x20, 0x01, 0x28, 0x0e, 0x32, 0x13, 0x2e, 0x6e, 0x65, 0x6f, + 0x2e, 0x66, 0x73, 0x2e, 0x76, 0x32, 0x2e, 0x61, 0x63, 0x6c, 0x2e, 0x52, 0x6f, 0x6c, 0x65, 0x52, + 0x04, 0x72, 0x6f, 0x6c, 0x65, 0x12, 0x12, 0x0a, 0x04, 0x6b, 0x65, 0x79, 0x73, 0x18, 0x02, 0x20, + 0x03, 0x28, 0x0c, 0x52, 0x04, 0x6b, 0x65, 0x79, 0x73, 0x22, 0xb3, 0x01, 0x0a, 0x09, 0x45, 0x41, + 0x43, 0x4c, 0x54, 0x61, 0x62, 0x6c, 0x65, 0x12, 0x31, 0x0a, 0x07, 0x76, 0x65, 0x72, 0x73, 0x69, + 0x6f, 0x6e, 0x18, 0x01, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x17, 0x2e, 0x6e, 0x65, 0x6f, 0x2e, 0x66, + 0x73, 0x2e, 0x76, 0x32, 0x2e, 0x72, 0x65, 0x66, 0x73, 0x2e, 0x56, 0x65, 0x72, 0x73, 0x69, 0x6f, + 0x6e, 0x52, 0x07, 0x76, 0x65, 0x72, 0x73, 0x69, 0x6f, 0x6e, 0x12, 0x3e, 0x0a, 0x0c, 0x63, 0x6f, + 0x6e, 0x74, 0x61, 0x69, 0x6e, 0x65, 0x72, 0x5f, 0x69, 0x64, 0x18, 0x02, 0x20, 0x01, 0x28, 0x0b, + 0x32, 0x1b, 0x2e, 0x6e, 0x65, 0x6f, 0x2e, 0x66, 0x73, 0x2e, 0x76, 0x32, 0x2e, 0x72, 0x65, 0x66, + 0x73, 0x2e, 0x43, 0x6f, 0x6e, 0x74, 0x61, 0x69, 0x6e, 0x65, 0x72, 0x49, 0x44, 0x52, 0x0b, 0x63, + 0x6f, 0x6e, 0x74, 0x61, 0x69, 0x6e, 0x65, 0x72, 0x49, 0x44, 0x12, 0x33, 0x0a, 0x07, 0x72, 0x65, + 0x63, 0x6f, 0x72, 0x64, 0x73, 0x18, 0x03, 0x20, 0x03, 0x28, 0x0b, 0x32, 0x19, 0x2e, 0x6e, 0x65, + 0x6f, 0x2e, 0x66, 0x73, 0x2e, 0x76, 0x32, 0x2e, 0x61, 0x63, 0x6c, 0x2e, 0x45, 0x41, 0x43, 0x4c, + 0x52, 0x65, 0x63, 0x6f, 0x72, 0x64, 0x52, 0x07, 0x72, 0x65, 0x63, 0x6f, 0x72, 0x64, 0x73, 0x22, + 0xb4, 0x03, 0x0a, 0x0b, 0x42, 0x65, 0x61, 0x72, 0x65, 0x72, 0x54, 0x6f, 0x6b, 0x65, 0x6e, 0x12, + 0x33, 0x0a, 0x04, 0x62, 0x6f, 0x64, 0x79, 0x18, 0x01, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x1f, 0x2e, + 0x6e, 0x65, 0x6f, 0x2e, 0x66, 0x73, 0x2e, 0x76, 0x32, 0x2e, 0x61, 0x63, 0x6c, 0x2e, 0x42, 0x65, + 0x61, 0x72, 0x65, 0x72, 0x54, 0x6f, 0x6b, 0x65, 0x6e, 0x2e, 0x42, 0x6f, 0x64, 0x79, 0x52, 0x04, + 0x62, 0x6f, 0x64, 0x79, 0x12, 0x37, 0x0a, 0x09, 0x73, 0x69, 0x67, 0x6e, 0x61, 0x74, 0x75, 0x72, + 0x65, 0x18, 0x02, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x19, 0x2e, 0x6e, 0x65, 0x6f, 0x2e, 0x66, 0x73, + 0x2e, 0x76, 0x32, 0x2e, 0x72, 0x65, 0x66, 0x73, 0x2e, 0x53, 0x69, 0x67, 0x6e, 0x61, 0x74, 0x75, + 0x72, 0x65, 0x52, 0x09, 0x73, 0x69, 0x67, 0x6e, 0x61, 0x74, 0x75, 0x72, 0x65, 0x1a, 0xb6, 0x02, + 0x0a, 0x04, 0x42, 0x6f, 0x64, 0x79, 0x12, 0x37, 0x0a, 0x0a, 0x65, 0x61, 0x63, 0x6c, 0x5f, 0x74, + 0x61, 0x62, 0x6c, 0x65, 0x18, 0x01, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x18, 0x2e, 0x6e, 0x65, 0x6f, + 0x2e, 0x66, 0x73, 0x2e, 0x76, 0x32, 0x2e, 0x61, 0x63, 0x6c, 0x2e, 0x45, 0x41, 0x43, 0x4c, 0x54, + 0x61, 0x62, 0x6c, 0x65, 0x52, 0x09, 0x65, 0x61, 0x63, 0x6c, 0x54, 0x61, 0x62, 0x6c, 0x65, 0x12, + 0x32, 0x0a, 0x08, 0x6f, 0x77, 0x6e, 0x65, 0x72, 0x5f, 0x69, 0x64, 0x18, 0x02, 0x20, 0x01, 0x28, + 0x0b, 0x32, 0x17, 0x2e, 0x6e, 0x65, 0x6f, 0x2e, 0x66, 0x73, 0x2e, 0x76, 0x32, 0x2e, 0x72, 0x65, + 0x66, 0x73, 0x2e, 0x4f, 0x77, 0x6e, 0x65, 0x72, 0x49, 0x44, 0x52, 0x07, 0x6f, 0x77, 0x6e, 0x65, + 0x72, 0x49, 0x44, 0x12, 0x49, 0x0a, 0x08, 0x6c, 0x69, 0x66, 0x65, 0x74, 0x69, 0x6d, 0x65, 0x18, + 0x03, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x2d, 0x2e, 0x6e, 0x65, 0x6f, 0x2e, 0x66, 0x73, 0x2e, 0x76, + 0x32, 0x2e, 0x61, 0x63, 0x6c, 0x2e, 0x42, 0x65, 0x61, 0x72, 0x65, 0x72, 0x54, 0x6f, 0x6b, 0x65, + 0x6e, 0x2e, 0x42, 0x6f, 0x64, 0x79, 0x2e, 0x54, 0x6f, 0x6b, 0x65, 0x6e, 0x4c, 0x69, 0x66, 0x65, + 0x74, 0x69, 0x6d, 0x65, 0x52, 0x08, 0x6c, 0x69, 0x66, 0x65, 0x74, 0x69, 0x6d, 0x65, 0x12, 0x2f, + 0x0a, 0x06, 0x69, 0x73, 0x73, 0x75, 0x65, 0x72, 0x18, 0x04, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x17, + 0x2e, 0x6e, 0x65, 0x6f, 0x2e, 0x66, 0x73, 0x2e, 0x76, 0x32, 0x2e, 0x72, 0x65, 0x66, 0x73, 0x2e, + 0x4f, 0x77, 0x6e, 0x65, 0x72, 0x49, 0x44, 0x52, 0x06, 0x69, 0x73, 0x73, 0x75, 0x65, 0x72, 0x1a, + 0x45, 0x0a, 0x0d, 0x54, 0x6f, 0x6b, 0x65, 0x6e, 0x4c, 0x69, 0x66, 0x65, 0x74, 0x69, 0x6d, 0x65, + 0x12, 0x10, 0x0a, 0x03, 0x65, 0x78, 0x70, 0x18, 0x01, 0x20, 0x01, 0x28, 0x04, 0x52, 0x03, 0x65, + 0x78, 0x70, 0x12, 0x10, 0x0a, 0x03, 0x6e, 0x62, 0x66, 0x18, 0x02, 0x20, 0x01, 0x28, 0x04, 0x52, + 0x03, 0x6e, 0x62, 0x66, 0x12, 0x10, 0x0a, 0x03, 0x69, 0x61, 0x74, 0x18, 0x03, 0x20, 0x01, 0x28, + 0x04, 0x52, 0x03, 0x69, 0x61, 0x74, 0x2a, 0x3e, 0x0a, 0x04, 0x52, 0x6f, 0x6c, 0x65, 0x12, 0x14, + 0x0a, 0x10, 0x52, 0x4f, 0x4c, 0x45, 0x5f, 0x55, 0x4e, 0x53, 0x50, 0x45, 0x43, 0x49, 0x46, 0x49, + 0x45, 0x44, 0x10, 0x00, 0x12, 0x08, 0x0a, 0x04, 0x55, 0x53, 0x45, 0x52, 0x10, 0x01, 0x12, 0x0a, + 0x0a, 0x06, 0x53, 0x59, 0x53, 0x54, 0x45, 0x4d, 0x10, 0x02, 0x12, 0x0a, 0x0a, 0x06, 0x4f, 0x54, + 0x48, 0x45, 0x52, 0x53, 0x10, 0x03, 0x2a, 0x90, 0x01, 0x0a, 0x09, 0x4d, 0x61, 0x74, 0x63, 0x68, + 0x54, 0x79, 0x70, 0x65, 0x12, 0x1a, 0x0a, 0x16, 0x4d, 0x41, 0x54, 0x43, 0x48, 0x5f, 0x54, 0x59, + 0x50, 0x45, 0x5f, 0x55, 0x4e, 0x53, 0x50, 0x45, 0x43, 0x49, 0x46, 0x49, 0x45, 0x44, 0x10, 0x00, + 0x12, 0x10, 0x0a, 0x0c, 0x53, 0x54, 0x52, 0x49, 0x4e, 0x47, 0x5f, 0x45, 0x51, 0x55, 0x41, 0x4c, + 0x10, 0x01, 0x12, 0x14, 0x0a, 0x10, 0x53, 0x54, 0x52, 0x49, 0x4e, 0x47, 0x5f, 0x4e, 0x4f, 0x54, + 0x5f, 0x45, 0x51, 0x55, 0x41, 0x4c, 0x10, 0x02, 0x12, 0x0f, 0x0a, 0x0b, 0x4e, 0x4f, 0x54, 0x5f, + 0x50, 0x52, 0x45, 0x53, 0x45, 0x4e, 0x54, 0x10, 0x03, 0x12, 0x0a, 0x0a, 0x06, 0x4e, 0x55, 0x4d, + 0x5f, 0x47, 0x54, 0x10, 0x04, 0x12, 0x0a, 0x0a, 0x06, 0x4e, 0x55, 0x4d, 0x5f, 0x47, 0x45, 0x10, + 0x05, 0x12, 0x0a, 0x0a, 0x06, 0x4e, 0x55, 0x4d, 0x5f, 0x4c, 0x54, 0x10, 0x06, 0x12, 0x0a, 0x0a, + 0x06, 0x4e, 0x55, 0x4d, 0x5f, 0x4c, 0x45, 0x10, 0x07, 0x2a, 0x7a, 0x0a, 0x09, 0x4f, 0x70, 0x65, + 0x72, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x12, 0x19, 0x0a, 0x15, 0x4f, 0x50, 0x45, 0x52, 0x41, 0x54, + 0x49, 0x4f, 0x4e, 0x5f, 0x55, 0x4e, 0x53, 0x50, 0x45, 0x43, 0x49, 0x46, 0x49, 0x45, 0x44, 0x10, + 0x00, 0x12, 0x07, 0x0a, 0x03, 0x47, 0x45, 0x54, 0x10, 0x01, 0x12, 0x08, 0x0a, 0x04, 0x48, 0x45, + 0x41, 0x44, 0x10, 0x02, 0x12, 0x07, 0x0a, 0x03, 0x50, 0x55, 0x54, 0x10, 0x03, 0x12, 0x0a, 0x0a, + 0x06, 0x44, 0x45, 0x4c, 0x45, 0x54, 0x45, 0x10, 0x04, 0x12, 0x0a, 0x0a, 0x06, 0x53, 0x45, 0x41, + 0x52, 0x43, 0x48, 0x10, 0x05, 0x12, 0x0c, 0x0a, 0x08, 0x47, 0x45, 0x54, 0x52, 0x41, 0x4e, 0x47, + 0x45, 0x10, 0x06, 0x12, 0x10, 0x0a, 0x0c, 0x47, 0x45, 0x54, 0x52, 0x41, 0x4e, 0x47, 0x45, 0x48, + 0x41, 0x53, 0x48, 0x10, 0x07, 0x2a, 0x35, 0x0a, 0x06, 0x41, 0x63, 0x74, 0x69, 0x6f, 0x6e, 0x12, + 0x16, 0x0a, 0x12, 0x41, 0x43, 0x54, 0x49, 0x4f, 0x4e, 0x5f, 0x55, 0x4e, 0x53, 0x50, 0x45, 0x43, + 0x49, 0x46, 0x49, 0x45, 0x44, 0x10, 0x00, 0x12, 0x09, 0x0a, 0x05, 0x41, 0x4c, 0x4c, 0x4f, 0x57, + 0x10, 0x01, 0x12, 0x08, 0x0a, 0x04, 0x44, 0x45, 0x4e, 0x59, 0x10, 0x02, 0x2a, 0x4a, 0x0a, 0x0a, + 0x48, 0x65, 0x61, 0x64, 0x65, 0x72, 0x54, 0x79, 0x70, 0x65, 0x12, 0x16, 0x0a, 0x12, 0x48, 0x45, + 0x41, 0x44, 0x45, 0x52, 0x5f, 0x55, 0x4e, 0x53, 0x50, 0x45, 0x43, 0x49, 0x46, 0x49, 0x45, 0x44, + 0x10, 0x00, 0x12, 0x0b, 0x0a, 0x07, 0x52, 0x45, 0x51, 0x55, 0x45, 0x53, 0x54, 0x10, 0x01, 0x12, + 0x0a, 0x0a, 0x06, 0x4f, 0x42, 0x4a, 0x45, 0x43, 0x54, 0x10, 0x02, 0x12, 0x0b, 0x0a, 0x07, 0x53, + 0x45, 0x52, 0x56, 0x49, 0x43, 0x45, 0x10, 0x03, 0x42, 0x4d, 0x5a, 0x31, 0x67, 0x69, 0x74, 0x68, + 0x75, 0x62, 0x2e, 0x63, 0x6f, 0x6d, 0x2f, 0x6e, 0x73, 0x70, 0x63, 0x63, 0x2d, 0x64, 0x65, 0x76, + 0x2f, 0x6e, 0x65, 0x6f, 0x66, 0x73, 0x2d, 0x61, 0x70, 0x69, 0x2d, 0x67, 0x6f, 0x2f, 0x76, 0x32, + 0x2f, 0x61, 0x63, 0x6c, 0x2f, 0x67, 0x72, 0x70, 0x63, 0x3b, 0x61, 0x63, 0x6c, 0xaa, 0x02, 0x17, + 0x4e, 0x65, 0x6f, 0x2e, 0x46, 0x69, 0x6c, 0x65, 0x53, 0x74, 0x6f, 0x72, 0x61, 0x67, 0x65, 0x2e, + 0x41, 0x50, 0x49, 0x2e, 0x41, 0x63, 0x6c, 0x62, 0x06, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x33, +} + +var ( + file_acl_grpc_types_proto_rawDescOnce sync.Once + file_acl_grpc_types_proto_rawDescData = file_acl_grpc_types_proto_rawDesc +) + +func file_acl_grpc_types_proto_rawDescGZIP() []byte { + file_acl_grpc_types_proto_rawDescOnce.Do(func() { + file_acl_grpc_types_proto_rawDescData = protoimpl.X.CompressGZIP(file_acl_grpc_types_proto_rawDescData) + }) + return file_acl_grpc_types_proto_rawDescData +} + +var file_acl_grpc_types_proto_enumTypes = make([]protoimpl.EnumInfo, 5) +var file_acl_grpc_types_proto_msgTypes = make([]protoimpl.MessageInfo, 7) +var file_acl_grpc_types_proto_goTypes = []interface{}{ + (Role)(0), // 0: neo.fs.v2.acl.Role + (MatchType)(0), // 1: neo.fs.v2.acl.MatchType + (Operation)(0), // 2: neo.fs.v2.acl.Operation + (Action)(0), // 3: neo.fs.v2.acl.Action + (HeaderType)(0), // 4: neo.fs.v2.acl.HeaderType + (*EACLRecord)(nil), // 5: neo.fs.v2.acl.EACLRecord + (*EACLTable)(nil), // 6: neo.fs.v2.acl.EACLTable + (*BearerToken)(nil), // 7: neo.fs.v2.acl.BearerToken + (*EACLRecord_Filter)(nil), // 8: neo.fs.v2.acl.EACLRecord.Filter + (*EACLRecord_Target)(nil), // 9: neo.fs.v2.acl.EACLRecord.Target + (*BearerToken_Body)(nil), // 10: neo.fs.v2.acl.BearerToken.Body + (*BearerToken_Body_TokenLifetime)(nil), // 11: neo.fs.v2.acl.BearerToken.Body.TokenLifetime + (*refs.Version)(nil), // 12: neo.fs.v2.refs.Version + (*refs.ContainerID)(nil), // 13: neo.fs.v2.refs.ContainerID + (*refs.Signature)(nil), // 14: neo.fs.v2.refs.Signature + (*refs.OwnerID)(nil), // 15: neo.fs.v2.refs.OwnerID +} +var file_acl_grpc_types_proto_depIdxs = []int32{ + 2, // 0: neo.fs.v2.acl.EACLRecord.operation:type_name -> neo.fs.v2.acl.Operation + 3, // 1: neo.fs.v2.acl.EACLRecord.action:type_name -> neo.fs.v2.acl.Action + 8, // 2: neo.fs.v2.acl.EACLRecord.filters:type_name -> neo.fs.v2.acl.EACLRecord.Filter + 9, // 3: neo.fs.v2.acl.EACLRecord.targets:type_name -> neo.fs.v2.acl.EACLRecord.Target + 12, // 4: neo.fs.v2.acl.EACLTable.version:type_name -> neo.fs.v2.refs.Version + 13, // 5: neo.fs.v2.acl.EACLTable.container_id:type_name -> neo.fs.v2.refs.ContainerID + 5, // 6: neo.fs.v2.acl.EACLTable.records:type_name -> neo.fs.v2.acl.EACLRecord + 10, // 7: neo.fs.v2.acl.BearerToken.body:type_name -> neo.fs.v2.acl.BearerToken.Body + 14, // 8: neo.fs.v2.acl.BearerToken.signature:type_name -> neo.fs.v2.refs.Signature + 4, // 9: neo.fs.v2.acl.EACLRecord.Filter.header_type:type_name -> neo.fs.v2.acl.HeaderType + 1, // 10: neo.fs.v2.acl.EACLRecord.Filter.match_type:type_name -> neo.fs.v2.acl.MatchType + 0, // 11: neo.fs.v2.acl.EACLRecord.Target.role:type_name -> neo.fs.v2.acl.Role + 6, // 12: neo.fs.v2.acl.BearerToken.Body.eacl_table:type_name -> neo.fs.v2.acl.EACLTable + 15, // 13: neo.fs.v2.acl.BearerToken.Body.owner_id:type_name -> neo.fs.v2.refs.OwnerID + 11, // 14: neo.fs.v2.acl.BearerToken.Body.lifetime:type_name -> neo.fs.v2.acl.BearerToken.Body.TokenLifetime + 15, // 15: neo.fs.v2.acl.BearerToken.Body.issuer:type_name -> neo.fs.v2.refs.OwnerID + 16, // [16:16] is the sub-list for method output_type + 16, // [16:16] is the sub-list for method input_type + 16, // [16:16] is the sub-list for extension type_name + 16, // [16:16] is the sub-list for extension extendee + 0, // [0:16] is the sub-list for field type_name +} + +func init() { file_acl_grpc_types_proto_init() } +func file_acl_grpc_types_proto_init() { + if File_acl_grpc_types_proto != nil { + return + } + if !protoimpl.UnsafeEnabled { + file_acl_grpc_types_proto_msgTypes[0].Exporter = func(v interface{}, i int) interface{} { + switch v := v.(*EACLRecord); i { + case 0: + return &v.state + case 1: + return &v.sizeCache + case 2: + return &v.unknownFields + default: + return nil + } + } + file_acl_grpc_types_proto_msgTypes[1].Exporter = func(v interface{}, i int) interface{} { + switch v := v.(*EACLTable); i { + case 0: + return &v.state + case 1: + return &v.sizeCache + case 2: + return &v.unknownFields + default: + return nil + } + } + file_acl_grpc_types_proto_msgTypes[2].Exporter = func(v interface{}, i int) interface{} { + switch v := v.(*BearerToken); i { + case 0: + return &v.state + case 1: + return &v.sizeCache + case 2: + return &v.unknownFields + default: + return nil + } + } + file_acl_grpc_types_proto_msgTypes[3].Exporter = func(v interface{}, i int) interface{} { + switch v := v.(*EACLRecord_Filter); i { + case 0: + return &v.state + case 1: + return &v.sizeCache + case 2: + return &v.unknownFields + default: + return nil + } + } + file_acl_grpc_types_proto_msgTypes[4].Exporter = func(v interface{}, i int) interface{} { + switch v := v.(*EACLRecord_Target); i { + case 0: + return &v.state + case 1: + return &v.sizeCache + case 2: + return &v.unknownFields + default: + return nil + } + } + file_acl_grpc_types_proto_msgTypes[5].Exporter = func(v interface{}, i int) interface{} { + switch v := v.(*BearerToken_Body); i { + case 0: + return &v.state + case 1: + return &v.sizeCache + case 2: + return &v.unknownFields + default: + return nil + } + } + file_acl_grpc_types_proto_msgTypes[6].Exporter = func(v interface{}, i int) interface{} { + switch v := v.(*BearerToken_Body_TokenLifetime); i { + case 0: + return &v.state + case 1: + return &v.sizeCache + case 2: + return &v.unknownFields + default: + return nil + } + } + } + type x struct{} + out := protoimpl.TypeBuilder{ + File: protoimpl.DescBuilder{ + GoPackagePath: reflect.TypeOf(x{}).PkgPath(), + RawDescriptor: file_acl_grpc_types_proto_rawDesc, + NumEnums: 5, + NumMessages: 7, + NumExtensions: 0, + NumServices: 0, + }, + GoTypes: file_acl_grpc_types_proto_goTypes, + DependencyIndexes: file_acl_grpc_types_proto_depIdxs, + EnumInfos: file_acl_grpc_types_proto_enumTypes, + MessageInfos: file_acl_grpc_types_proto_msgTypes, + }.Build() + File_acl_grpc_types_proto = out.File + file_acl_grpc_types_proto_rawDesc = nil + file_acl_grpc_types_proto_goTypes = nil + file_acl_grpc_types_proto_depIdxs = nil +} diff --git a/api/audit/types.pb.go b/api/audit/types.pb.go new file mode 100644 index 000000000..754764a02 --- /dev/null +++ b/api/audit/types.pb.go @@ -0,0 +1,312 @@ +// Code generated by protoc-gen-go. DO NOT EDIT. +// versions: +// protoc-gen-go v1.32.0 +// protoc v4.25.1 +// source: audit/grpc/types.proto + +package audit + +import ( + reflect "reflect" + sync "sync" + + "github.com/nspcc-dev/neofs-sdk-go/api/refs" + protoreflect "google.golang.org/protobuf/reflect/protoreflect" + protoimpl "google.golang.org/protobuf/runtime/protoimpl" +) + +const ( + // Verify that this generated code is sufficiently up-to-date. + _ = protoimpl.EnforceVersion(20 - protoimpl.MinVersion) + // Verify that runtime/protoimpl is sufficiently up-to-date. + _ = protoimpl.EnforceVersion(protoimpl.MaxVersion - 20) +) + +// DataAuditResult keeps record of conducted Data Audits. The detailed report is +// generated separately. +type DataAuditResult struct { + state protoimpl.MessageState + sizeCache protoimpl.SizeCache + unknownFields protoimpl.UnknownFields + + // Data Audit Result format version. Effectively, the version of API library + // used to report DataAuditResult structure. + Version *refs.Version `protobuf:"bytes,1,opt,name=version,proto3" json:"version,omitempty"` + // Epoch number when the Data Audit was conducted + AuditEpoch uint64 `protobuf:"fixed64,2,opt,name=audit_epoch,json=auditEpoch,proto3" json:"audit_epoch,omitempty"` + // Container under audit + ContainerId *refs.ContainerID `protobuf:"bytes,3,opt,name=container_id,json=containerID,proto3" json:"container_id,omitempty"` + // Public key of the auditing InnerRing node in a binary format + PublicKey []byte `protobuf:"bytes,4,opt,name=public_key,json=publicKey,proto3" json:"public_key,omitempty"` + // Shows if Data Audit process was complete in time or if it was cancelled + Complete bool `protobuf:"varint,5,opt,name=complete,proto3" json:"complete,omitempty"` + // Number of request done at PoR stage + Requests uint32 `protobuf:"varint,6,opt,name=requests,proto3" json:"requests,omitempty"` + // Number of retries done at PoR stage + Retries uint32 `protobuf:"varint,7,opt,name=retries,proto3" json:"retries,omitempty"` + // List of Storage Groups that passed audit PoR stage + PassSg []*refs.ObjectID `protobuf:"bytes,8,rep,name=pass_sg,json=passSG,proto3" json:"pass_sg,omitempty"` + // List of Storage Groups that failed audit PoR stage + FailSg []*refs.ObjectID `protobuf:"bytes,9,rep,name=fail_sg,json=failSG,proto3" json:"fail_sg,omitempty"` + // Number of sampled objects under the audit placed in an optimal way according to + // the containers placement policy when checking PoP + Hit uint32 `protobuf:"varint,10,opt,name=hit,proto3" json:"hit,omitempty"` + // Number of sampled objects under the audit placed in suboptimal way according to + // the containers placement policy, but still at a satisfactory level when + // checking PoP + Miss uint32 `protobuf:"varint,11,opt,name=miss,proto3" json:"miss,omitempty"` + // Number of sampled objects under the audit stored inconsistently with the + // placement policy or not found at all when checking PoP + Fail uint32 `protobuf:"varint,12,opt,name=fail,proto3" json:"fail,omitempty"` + // List of storage node public keys that passed at least one PDP + PassNodes [][]byte `protobuf:"bytes,13,rep,name=pass_nodes,json=passNodes,proto3" json:"pass_nodes,omitempty"` + // List of storage node public keys that failed at least one PDP + FailNodes [][]byte `protobuf:"bytes,14,rep,name=fail_nodes,json=failNodes,proto3" json:"fail_nodes,omitempty"` +} + +func (x *DataAuditResult) Reset() { + *x = DataAuditResult{} + if protoimpl.UnsafeEnabled { + mi := &file_audit_grpc_types_proto_msgTypes[0] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) + } +} + +func (x *DataAuditResult) String() string { + return protoimpl.X.MessageStringOf(x) +} + +func (*DataAuditResult) ProtoMessage() {} + +func (x *DataAuditResult) ProtoReflect() protoreflect.Message { + mi := &file_audit_grpc_types_proto_msgTypes[0] + if protoimpl.UnsafeEnabled && x != nil { + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + if ms.LoadMessageInfo() == nil { + ms.StoreMessageInfo(mi) + } + return ms + } + return mi.MessageOf(x) +} + +// Deprecated: Use DataAuditResult.ProtoReflect.Descriptor instead. +func (*DataAuditResult) Descriptor() ([]byte, []int) { + return file_audit_grpc_types_proto_rawDescGZIP(), []int{0} +} + +func (x *DataAuditResult) GetVersion() *refs.Version { + if x != nil { + return x.Version + } + return nil +} + +func (x *DataAuditResult) GetAuditEpoch() uint64 { + if x != nil { + return x.AuditEpoch + } + return 0 +} + +func (x *DataAuditResult) GetContainerId() *refs.ContainerID { + if x != nil { + return x.ContainerId + } + return nil +} + +func (x *DataAuditResult) GetPublicKey() []byte { + if x != nil { + return x.PublicKey + } + return nil +} + +func (x *DataAuditResult) GetComplete() bool { + if x != nil { + return x.Complete + } + return false +} + +func (x *DataAuditResult) GetRequests() uint32 { + if x != nil { + return x.Requests + } + return 0 +} + +func (x *DataAuditResult) GetRetries() uint32 { + if x != nil { + return x.Retries + } + return 0 +} + +func (x *DataAuditResult) GetPassSg() []*refs.ObjectID { + if x != nil { + return x.PassSg + } + return nil +} + +func (x *DataAuditResult) GetFailSg() []*refs.ObjectID { + if x != nil { + return x.FailSg + } + return nil +} + +func (x *DataAuditResult) GetHit() uint32 { + if x != nil { + return x.Hit + } + return 0 +} + +func (x *DataAuditResult) GetMiss() uint32 { + if x != nil { + return x.Miss + } + return 0 +} + +func (x *DataAuditResult) GetFail() uint32 { + if x != nil { + return x.Fail + } + return 0 +} + +func (x *DataAuditResult) GetPassNodes() [][]byte { + if x != nil { + return x.PassNodes + } + return nil +} + +func (x *DataAuditResult) GetFailNodes() [][]byte { + if x != nil { + return x.FailNodes + } + return nil +} + +var File_audit_grpc_types_proto protoreflect.FileDescriptor + +var file_audit_grpc_types_proto_rawDesc = []byte{ + 0x0a, 0x16, 0x61, 0x75, 0x64, 0x69, 0x74, 0x2f, 0x67, 0x72, 0x70, 0x63, 0x2f, 0x74, 0x79, 0x70, + 0x65, 0x73, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x12, 0x0f, 0x6e, 0x65, 0x6f, 0x2e, 0x66, 0x73, + 0x2e, 0x76, 0x32, 0x2e, 0x61, 0x75, 0x64, 0x69, 0x74, 0x1a, 0x15, 0x72, 0x65, 0x66, 0x73, 0x2f, + 0x67, 0x72, 0x70, 0x63, 0x2f, 0x74, 0x79, 0x70, 0x65, 0x73, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, + 0x22, 0xf4, 0x03, 0x0a, 0x0f, 0x44, 0x61, 0x74, 0x61, 0x41, 0x75, 0x64, 0x69, 0x74, 0x52, 0x65, + 0x73, 0x75, 0x6c, 0x74, 0x12, 0x31, 0x0a, 0x07, 0x76, 0x65, 0x72, 0x73, 0x69, 0x6f, 0x6e, 0x18, + 0x01, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x17, 0x2e, 0x6e, 0x65, 0x6f, 0x2e, 0x66, 0x73, 0x2e, 0x76, + 0x32, 0x2e, 0x72, 0x65, 0x66, 0x73, 0x2e, 0x56, 0x65, 0x72, 0x73, 0x69, 0x6f, 0x6e, 0x52, 0x07, + 0x76, 0x65, 0x72, 0x73, 0x69, 0x6f, 0x6e, 0x12, 0x1f, 0x0a, 0x0b, 0x61, 0x75, 0x64, 0x69, 0x74, + 0x5f, 0x65, 0x70, 0x6f, 0x63, 0x68, 0x18, 0x02, 0x20, 0x01, 0x28, 0x06, 0x52, 0x0a, 0x61, 0x75, + 0x64, 0x69, 0x74, 0x45, 0x70, 0x6f, 0x63, 0x68, 0x12, 0x3e, 0x0a, 0x0c, 0x63, 0x6f, 0x6e, 0x74, + 0x61, 0x69, 0x6e, 0x65, 0x72, 0x5f, 0x69, 0x64, 0x18, 0x03, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x1b, + 0x2e, 0x6e, 0x65, 0x6f, 0x2e, 0x66, 0x73, 0x2e, 0x76, 0x32, 0x2e, 0x72, 0x65, 0x66, 0x73, 0x2e, + 0x43, 0x6f, 0x6e, 0x74, 0x61, 0x69, 0x6e, 0x65, 0x72, 0x49, 0x44, 0x52, 0x0b, 0x63, 0x6f, 0x6e, + 0x74, 0x61, 0x69, 0x6e, 0x65, 0x72, 0x49, 0x44, 0x12, 0x1d, 0x0a, 0x0a, 0x70, 0x75, 0x62, 0x6c, + 0x69, 0x63, 0x5f, 0x6b, 0x65, 0x79, 0x18, 0x04, 0x20, 0x01, 0x28, 0x0c, 0x52, 0x09, 0x70, 0x75, + 0x62, 0x6c, 0x69, 0x63, 0x4b, 0x65, 0x79, 0x12, 0x1a, 0x0a, 0x08, 0x63, 0x6f, 0x6d, 0x70, 0x6c, + 0x65, 0x74, 0x65, 0x18, 0x05, 0x20, 0x01, 0x28, 0x08, 0x52, 0x08, 0x63, 0x6f, 0x6d, 0x70, 0x6c, + 0x65, 0x74, 0x65, 0x12, 0x1a, 0x0a, 0x08, 0x72, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x73, 0x18, + 0x06, 0x20, 0x01, 0x28, 0x0d, 0x52, 0x08, 0x72, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x73, 0x12, + 0x18, 0x0a, 0x07, 0x72, 0x65, 0x74, 0x72, 0x69, 0x65, 0x73, 0x18, 0x07, 0x20, 0x01, 0x28, 0x0d, + 0x52, 0x07, 0x72, 0x65, 0x74, 0x72, 0x69, 0x65, 0x73, 0x12, 0x31, 0x0a, 0x07, 0x70, 0x61, 0x73, + 0x73, 0x5f, 0x73, 0x67, 0x18, 0x08, 0x20, 0x03, 0x28, 0x0b, 0x32, 0x18, 0x2e, 0x6e, 0x65, 0x6f, + 0x2e, 0x66, 0x73, 0x2e, 0x76, 0x32, 0x2e, 0x72, 0x65, 0x66, 0x73, 0x2e, 0x4f, 0x62, 0x6a, 0x65, + 0x63, 0x74, 0x49, 0x44, 0x52, 0x06, 0x70, 0x61, 0x73, 0x73, 0x53, 0x47, 0x12, 0x31, 0x0a, 0x07, + 0x66, 0x61, 0x69, 0x6c, 0x5f, 0x73, 0x67, 0x18, 0x09, 0x20, 0x03, 0x28, 0x0b, 0x32, 0x18, 0x2e, + 0x6e, 0x65, 0x6f, 0x2e, 0x66, 0x73, 0x2e, 0x76, 0x32, 0x2e, 0x72, 0x65, 0x66, 0x73, 0x2e, 0x4f, + 0x62, 0x6a, 0x65, 0x63, 0x74, 0x49, 0x44, 0x52, 0x06, 0x66, 0x61, 0x69, 0x6c, 0x53, 0x47, 0x12, + 0x10, 0x0a, 0x03, 0x68, 0x69, 0x74, 0x18, 0x0a, 0x20, 0x01, 0x28, 0x0d, 0x52, 0x03, 0x68, 0x69, + 0x74, 0x12, 0x12, 0x0a, 0x04, 0x6d, 0x69, 0x73, 0x73, 0x18, 0x0b, 0x20, 0x01, 0x28, 0x0d, 0x52, + 0x04, 0x6d, 0x69, 0x73, 0x73, 0x12, 0x12, 0x0a, 0x04, 0x66, 0x61, 0x69, 0x6c, 0x18, 0x0c, 0x20, + 0x01, 0x28, 0x0d, 0x52, 0x04, 0x66, 0x61, 0x69, 0x6c, 0x12, 0x1d, 0x0a, 0x0a, 0x70, 0x61, 0x73, + 0x73, 0x5f, 0x6e, 0x6f, 0x64, 0x65, 0x73, 0x18, 0x0d, 0x20, 0x03, 0x28, 0x0c, 0x52, 0x09, 0x70, + 0x61, 0x73, 0x73, 0x4e, 0x6f, 0x64, 0x65, 0x73, 0x12, 0x1d, 0x0a, 0x0a, 0x66, 0x61, 0x69, 0x6c, + 0x5f, 0x6e, 0x6f, 0x64, 0x65, 0x73, 0x18, 0x0e, 0x20, 0x03, 0x28, 0x0c, 0x52, 0x09, 0x66, 0x61, + 0x69, 0x6c, 0x4e, 0x6f, 0x64, 0x65, 0x73, 0x42, 0x53, 0x5a, 0x35, 0x67, 0x69, 0x74, 0x68, 0x75, + 0x62, 0x2e, 0x63, 0x6f, 0x6d, 0x2f, 0x6e, 0x73, 0x70, 0x63, 0x63, 0x2d, 0x64, 0x65, 0x76, 0x2f, + 0x6e, 0x65, 0x6f, 0x66, 0x73, 0x2d, 0x61, 0x70, 0x69, 0x2d, 0x67, 0x6f, 0x2f, 0x76, 0x32, 0x2f, + 0x61, 0x75, 0x64, 0x69, 0x74, 0x2f, 0x67, 0x72, 0x70, 0x63, 0x3b, 0x61, 0x75, 0x64, 0x69, 0x74, + 0xaa, 0x02, 0x19, 0x4e, 0x65, 0x6f, 0x2e, 0x46, 0x69, 0x6c, 0x65, 0x53, 0x74, 0x6f, 0x72, 0x61, + 0x67, 0x65, 0x2e, 0x41, 0x50, 0x49, 0x2e, 0x41, 0x75, 0x64, 0x69, 0x74, 0x62, 0x06, 0x70, 0x72, + 0x6f, 0x74, 0x6f, 0x33, +} + +var ( + file_audit_grpc_types_proto_rawDescOnce sync.Once + file_audit_grpc_types_proto_rawDescData = file_audit_grpc_types_proto_rawDesc +) + +func file_audit_grpc_types_proto_rawDescGZIP() []byte { + file_audit_grpc_types_proto_rawDescOnce.Do(func() { + file_audit_grpc_types_proto_rawDescData = protoimpl.X.CompressGZIP(file_audit_grpc_types_proto_rawDescData) + }) + return file_audit_grpc_types_proto_rawDescData +} + +var file_audit_grpc_types_proto_msgTypes = make([]protoimpl.MessageInfo, 1) +var file_audit_grpc_types_proto_goTypes = []interface{}{ + (*DataAuditResult)(nil), // 0: neo.fs.v2.audit.DataAuditResult + (*refs.Version)(nil), // 1: neo.fs.v2.refs.Version + (*refs.ContainerID)(nil), // 2: neo.fs.v2.refs.ContainerID + (*refs.ObjectID)(nil), // 3: neo.fs.v2.refs.ObjectID +} +var file_audit_grpc_types_proto_depIdxs = []int32{ + 1, // 0: neo.fs.v2.audit.DataAuditResult.version:type_name -> neo.fs.v2.refs.Version + 2, // 1: neo.fs.v2.audit.DataAuditResult.container_id:type_name -> neo.fs.v2.refs.ContainerID + 3, // 2: neo.fs.v2.audit.DataAuditResult.pass_sg:type_name -> neo.fs.v2.refs.ObjectID + 3, // 3: neo.fs.v2.audit.DataAuditResult.fail_sg:type_name -> neo.fs.v2.refs.ObjectID + 4, // [4:4] is the sub-list for method output_type + 4, // [4:4] is the sub-list for method input_type + 4, // [4:4] is the sub-list for extension type_name + 4, // [4:4] is the sub-list for extension extendee + 0, // [0:4] is the sub-list for field type_name +} + +func init() { file_audit_grpc_types_proto_init() } +func file_audit_grpc_types_proto_init() { + if File_audit_grpc_types_proto != nil { + return + } + if !protoimpl.UnsafeEnabled { + file_audit_grpc_types_proto_msgTypes[0].Exporter = func(v interface{}, i int) interface{} { + switch v := v.(*DataAuditResult); i { + case 0: + return &v.state + case 1: + return &v.sizeCache + case 2: + return &v.unknownFields + default: + return nil + } + } + } + type x struct{} + out := protoimpl.TypeBuilder{ + File: protoimpl.DescBuilder{ + GoPackagePath: reflect.TypeOf(x{}).PkgPath(), + RawDescriptor: file_audit_grpc_types_proto_rawDesc, + NumEnums: 0, + NumMessages: 1, + NumExtensions: 0, + NumServices: 0, + }, + GoTypes: file_audit_grpc_types_proto_goTypes, + DependencyIndexes: file_audit_grpc_types_proto_depIdxs, + MessageInfos: file_audit_grpc_types_proto_msgTypes, + }.Build() + File_audit_grpc_types_proto = out.File + file_audit_grpc_types_proto_rawDesc = nil + file_audit_grpc_types_proto_goTypes = nil + file_audit_grpc_types_proto_depIdxs = nil +} diff --git a/api/container/encoding.go b/api/container/encoding.go new file mode 100644 index 000000000..763212ec5 --- /dev/null +++ b/api/container/encoding.go @@ -0,0 +1,337 @@ +package container + +import "github.com/nspcc-dev/neofs-sdk-go/internal/proto" + +const ( + _ = iota + fieldContainerAttrKey + fieldContainerAttrVal +) + +func (x *Container_Attribute) MarshaledSize() int { + var sz int + if x != nil { + sz = proto.SizeBytes(fieldContainerAttrKey, x.Key) + + proto.SizeBytes(fieldContainerAttrVal, x.Value) + } + return sz +} + +func (x *Container_Attribute) MarshalStable(b []byte) { + if x != nil { + off := proto.MarshalBytes(b, fieldContainerAttrKey, x.Key) + proto.MarshalBytes(b[off:], fieldContainerAttrVal, x.Value) + } +} + +const ( + _ = iota + fieldContainerVersion + fieldContainerOwner + fieldContainerNonce + fieldContainerBasicACL + fieldContainerAttributes + fieldContainerPolicy +) + +func (x *Container) MarshaledSize() int { + var sz int + if x != nil { + sz = proto.SizeNested(fieldContainerVersion, x.Version) + + proto.SizeNested(fieldContainerOwner, x.OwnerId) + + proto.SizeBytes(fieldContainerNonce, x.Nonce) + + proto.SizeVarint(fieldContainerBasicACL, x.BasicAcl) + + proto.SizeNested(fieldContainerPolicy, x.PlacementPolicy) + for i := range x.Attributes { + sz += proto.SizeNested(fieldContainerAttributes, x.Attributes[i]) + } + } + return sz +} + +func (x *Container) MarshalStable(b []byte) { + if x != nil { + off := proto.MarshalNested(b, fieldContainerVersion, x.Version) + off += proto.MarshalNested(b[off:], fieldContainerOwner, x.OwnerId) + off += proto.MarshalBytes(b[off:], fieldContainerNonce, x.Nonce) + off += proto.MarshalVarint(b[off:], fieldContainerBasicACL, x.BasicAcl) + for i := range x.Attributes { + off += proto.MarshalNested(b[off:], fieldContainerAttributes, x.Attributes[i]) + } + proto.MarshalNested(b[off:], fieldContainerPolicy, x.PlacementPolicy) + } +} + +const ( + _ = iota + fieldPutReqContainer + fieldPutReqSignature +) + +func (x *PutRequest_Body) MarshaledSize() int { + var sz int + if x != nil { + sz = proto.SizeNested(fieldPutReqContainer, x.Container) + + proto.SizeNested(fieldPutReqSignature, x.Signature) + } + return sz +} + +func (x *PutRequest_Body) MarshalStable(b []byte) { + if x != nil { + off := proto.MarshalNested(b, fieldPutReqContainer, x.Container) + proto.MarshalNested(b[off:], fieldPutReqSignature, x.Signature) + } +} + +const ( + _ = iota + fieldPutRespID +) + +func (x *PutResponse_Body) MarshaledSize() int { + var sz int + if x != nil { + sz = proto.SizeNested(fieldPutRespID, x.ContainerId) + } + return sz +} + +func (x *PutResponse_Body) MarshalStable(b []byte) { + if x != nil { + proto.MarshalNested(b, fieldPutRespID, x.ContainerId) + } +} + +const ( + _ = iota + fieldDeleteReqContainer + fieldDeleteReqSignature +) + +func (x *DeleteRequest_Body) MarshaledSize() int { + var sz int + if x != nil { + sz = proto.SizeNested(fieldDeleteReqContainer, x.ContainerId) + + proto.SizeNested(fieldDeleteReqSignature, x.Signature) + } + return sz +} + +func (x *DeleteRequest_Body) MarshalStable(b []byte) { + if x != nil { + off := proto.MarshalNested(b, fieldDeleteReqContainer, x.ContainerId) + proto.MarshalNested(b[off:], fieldDeleteReqSignature, x.Signature) + } +} + +func (x *DeleteResponse_Body) MarshaledSize() int { return 0 } +func (x *DeleteResponse_Body) MarshalStable([]byte) {} + +const ( + _ = iota + fieldGetReqContainer +) + +func (x *GetRequest_Body) MarshaledSize() int { + var sz int + if x != nil { + sz = proto.SizeNested(fieldGetReqContainer, x.ContainerId) + } + return sz +} + +func (x *GetRequest_Body) MarshalStable(b []byte) { + if x != nil { + proto.MarshalNested(b, fieldGetReqContainer, x.ContainerId) + } +} + +const ( + _ = iota + fieldGetRespContainer + fieldGetRespSignature + fieldGetRespSession +) + +func (x *GetResponse_Body) MarshaledSize() int { + var sz int + if x != nil { + sz = proto.SizeNested(fieldGetRespContainer, x.Container) + + proto.SizeNested(fieldGetRespSignature, x.Signature) + + proto.SizeNested(fieldGetRespSession, x.SessionToken) + } + return sz +} + +func (x *GetResponse_Body) MarshalStable(b []byte) { + if x != nil { + off := proto.MarshalNested(b, fieldGetRespContainer, x.Container) + off += proto.MarshalNested(b[off:], fieldGetRespSignature, x.Signature) + proto.MarshalNested(b[off:], fieldGetRespSession, x.SessionToken) + } +} + +const ( + _ = iota + fieldListReqOwner +) + +func (x *ListRequest_Body) MarshaledSize() int { + var sz int + if x != nil { + sz = proto.SizeNested(fieldListReqOwner, x.OwnerId) + } + return sz +} + +func (x *ListRequest_Body) MarshalStable(b []byte) { + if x != nil { + proto.MarshalNested(b, fieldListReqOwner, x.OwnerId) + } +} + +const ( + _ = iota + fieldListRespIDs +) + +func (x *ListResponse_Body) MarshaledSize() int { + var sz int + if x != nil { + for i := range x.ContainerIds { + sz += proto.SizeNested(fieldListRespIDs, x.ContainerIds[i]) + } + } + return sz +} + +func (x *ListResponse_Body) MarshalStable(b []byte) { + if x != nil { + var off int + for i := range x.ContainerIds { + off += proto.MarshalNested(b[off:], fieldListRespIDs, x.ContainerIds[i]) + } + } +} + +const ( + _ = iota + fieldSetEACLReqTable + fieldSetEACLReqSignature +) + +func (x *SetExtendedACLRequest_Body) MarshaledSize() int { + var sz int + if x != nil { + sz = proto.SizeNested(fieldSetEACLReqTable, x.Eacl) + + proto.SizeNested(fieldSetEACLReqSignature, x.Signature) + } + return sz +} + +func (x *SetExtendedACLRequest_Body) MarshalStable(b []byte) { + if x != nil { + off := proto.MarshalNested(b, fieldSetEACLReqTable, x.Eacl) + proto.MarshalNested(b[off:], fieldSetEACLReqSignature, x.Signature) + } +} + +func (x *SetExtendedACLResponse_Body) MarshaledSize() int { return 0 } +func (x *SetExtendedACLResponse_Body) MarshalStable([]byte) {} + +const ( + _ = iota + fieldGetEACLReqContainer +) + +func (x *GetExtendedACLRequest_Body) MarshaledSize() int { + var sz int + if x != nil { + sz = proto.SizeNested(fieldGetEACLReqContainer, x.ContainerId) + } + return sz +} + +func (x *GetExtendedACLRequest_Body) MarshalStable(b []byte) { + if x != nil { + proto.MarshalNested(b, fieldGetEACLReqContainer, x.ContainerId) + } +} + +const ( + _ = iota + fieldGetEACLRespTable + fieldGetEACLRespSignature + fieldGetEACLRespSession +) + +func (x *GetExtendedACLResponse_Body) MarshaledSize() int { + var sz int + if x != nil { + sz = proto.SizeNested(fieldGetEACLRespTable, x.Eacl) + + proto.SizeNested(fieldGetEACLRespSignature, x.Signature) + + proto.SizeNested(fieldGetEACLRespSession, x.SessionToken) + } + return sz +} + +func (x *GetExtendedACLResponse_Body) MarshalStable(b []byte) { + if x != nil { + off := proto.MarshalNested(b, fieldGetEACLRespTable, x.Eacl) + off += proto.MarshalNested(b[off:], fieldGetEACLRespSignature, x.Signature) + proto.MarshalNested(b[off:], fieldGetEACLRespSession, x.SessionToken) + } +} + +const ( + _ = iota + fieldUsedSpaceEpoch + fieldUsedSpaceContainer + fieldUsedSpaceValue +) + +func (x *AnnounceUsedSpaceRequest_Body_Announcement) MarshaledSize() int { + var sz int + if x != nil { + sz = proto.SizeVarint(fieldUsedSpaceEpoch, x.Epoch) + + proto.SizeNested(fieldUsedSpaceContainer, x.ContainerId) + + proto.SizeVarint(fieldUsedSpaceValue, x.UsedSpace) + } + return sz +} + +func (x *AnnounceUsedSpaceRequest_Body_Announcement) MarshalStable(b []byte) { + if x != nil { + off := proto.MarshalVarint(b, fieldUsedSpaceEpoch, x.Epoch) + off += proto.MarshalNested(b[off:], fieldUsedSpaceContainer, x.ContainerId) + proto.MarshalVarint(b[off:], fieldUsedSpaceValue, x.UsedSpace) + } +} + +const ( + _ = iota + fieldAnnounceReqList +) + +func (x *AnnounceUsedSpaceRequest_Body) MarshaledSize() int { + var sz int + if x != nil { + for i := range x.Announcements { + sz += proto.SizeNested(fieldAnnounceReqList, x.Announcements[i]) + } + } + return sz +} + +func (x *AnnounceUsedSpaceRequest_Body) MarshalStable(b []byte) { + if x != nil { + var off int + for i := range x.Announcements { + off += proto.MarshalNested(b[off:], fieldAnnounceReqList, x.Announcements[i]) + } + } +} + +func (x *AnnounceUsedSpaceResponse_Body) MarshaledSize() int { return 0 } +func (x *AnnounceUsedSpaceResponse_Body) MarshalStable([]byte) {} diff --git a/api/container/encoding_test.go b/api/container/encoding_test.go new file mode 100644 index 000000000..aa6a5eab4 --- /dev/null +++ b/api/container/encoding_test.go @@ -0,0 +1,422 @@ +package container_test + +import ( + "testing" + + "github.com/nspcc-dev/neofs-sdk-go/api/acl" + "github.com/nspcc-dev/neofs-sdk-go/api/container" + "github.com/nspcc-dev/neofs-sdk-go/api/netmap" + "github.com/nspcc-dev/neofs-sdk-go/api/refs" + "github.com/nspcc-dev/neofs-sdk-go/api/session" + "github.com/stretchr/testify/require" + "google.golang.org/protobuf/proto" +) + +func TestPutRequest_Body(t *testing.T) { + v := &container.PutRequest_Body{ + Container: &container.Container{ + Version: &refs.Version{Major: 1, Minor: 2}, + OwnerId: &refs.OwnerID{Value: []byte("any_owner")}, + Nonce: []byte("any_nonce"), + BasicAcl: 3, + Attributes: []*container.Container_Attribute{ + {Key: "attr_key1", Value: "attr_val1"}, + {Key: "attr_key2", Value: "attr_val2"}, + }, + PlacementPolicy: &netmap.PlacementPolicy{ + Replicas: []*netmap.Replica{ + {Count: 4, Selector: "selector1"}, + {Count: 5, Selector: "selector2"}, + }, + ContainerBackupFactor: 6, + Selectors: []*netmap.Selector{ + {Name: "selector3", Count: 7, Clause: 8, Attribute: "attr1", Filter: "filter1"}, + {Name: "selector4", Count: 9, Clause: 10, Attribute: "attr2", Filter: "filter2"}, + }, + Filters: []*netmap.Filter{ + {Name: "filter3", Key: "filter_key1", Op: 11, Value: "filter_val1", Filters: []*netmap.Filter{ + {Name: "filter4", Key: "filter_key2", Op: 12, Value: "filter_val2"}, + {Name: "filter5", Key: "filter_key3", Op: 13, Value: "filter_val3"}, + }}, + {Name: "filter6", Key: "filter_key4", Op: 14, Value: "filter_val4", Filters: []*netmap.Filter{ + {Name: "filter7", Key: "filter_key5", Op: 15, Value: "filter_val5"}, + {Name: "filter8", Key: "filter_key6", Op: 16, Value: "filter_val6"}, + }}, + }, + SubnetId: &refs.SubnetID{Value: 17}, + }, + }, + Signature: &refs.SignatureRFC6979{Key: []byte("any_pubkey"), Sign: []byte("any_signature")}, + } + + sz := v.MarshaledSize() + b := make([]byte, sz) + v.MarshalStable(b) + + var res container.PutRequest_Body + err := proto.Unmarshal(b, &res) + require.NoError(t, err) + require.Empty(t, res.ProtoReflect().GetUnknown()) + require.Equal(t, v.Container, res.Container) + require.Equal(t, v.Signature, res.Signature) +} + +func TestPutResponse_Body(t *testing.T) { + v := &container.PutResponse_Body{ + ContainerId: &refs.ContainerID{Value: []byte("any_container")}, + } + + sz := v.MarshaledSize() + b := make([]byte, sz) + v.MarshalStable(b) + + var res container.PutResponse_Body + err := proto.Unmarshal(b, &res) + require.NoError(t, err) + require.Empty(t, res.ProtoReflect().GetUnknown()) + require.Equal(t, v.ContainerId, res.ContainerId) +} + +func TestDeleteRequest_Body(t *testing.T) { + v := &container.DeleteRequest_Body{ + ContainerId: &refs.ContainerID{Value: []byte("any_container")}, + Signature: &refs.SignatureRFC6979{Key: []byte("any_pubkey"), Sign: []byte("any_signature")}, + } + + sz := v.MarshaledSize() + b := make([]byte, sz) + v.MarshalStable(b) + + var res container.DeleteRequest_Body + err := proto.Unmarshal(b, &res) + require.NoError(t, err) + require.Empty(t, res.ProtoReflect().GetUnknown()) + require.Equal(t, v.ContainerId, res.ContainerId) + require.Equal(t, v.Signature, res.Signature) +} + +func TestDeleteResponse_Body(t *testing.T) { + var v container.DeleteResponse_Body + require.Zero(t, v.MarshaledSize()) + require.NotPanics(t, func() { v.MarshalStable(nil) }) + b := []byte("not_a_protobuf") + v.MarshalStable(b) + require.EqualValues(t, "not_a_protobuf", b) +} + +func TestGetRequest_Body(t *testing.T) { + v := &container.GetRequest_Body{ + ContainerId: &refs.ContainerID{Value: []byte("any_container")}, + } + + sz := v.MarshaledSize() + b := make([]byte, sz) + v.MarshalStable(b) + + var res container.GetRequest_Body + err := proto.Unmarshal(b, &res) + require.NoError(t, err) + require.Empty(t, res.ProtoReflect().GetUnknown()) + require.Equal(t, v.ContainerId, res.ContainerId) +} + +func TestGetResponse_Body(t *testing.T) { + v := &container.GetResponse_Body{ + Container: &container.Container{ + Version: &refs.Version{Major: 1, Minor: 2}, + OwnerId: &refs.OwnerID{Value: []byte("any_owner")}, + Nonce: []byte("any_nonce"), + BasicAcl: 3, + Attributes: []*container.Container_Attribute{ + {Key: "attr_key1", Value: "attr_val1"}, + {Key: "attr_key2", Value: "attr_val2"}, + }, + PlacementPolicy: &netmap.PlacementPolicy{ + Replicas: []*netmap.Replica{ + {Count: 4, Selector: "selector1"}, + {Count: 5, Selector: "selector2"}, + }, + ContainerBackupFactor: 6, + Selectors: []*netmap.Selector{ + {Name: "selector3", Count: 7, Clause: 8, Attribute: "attr1", Filter: "filter1"}, + {Name: "selector4", Count: 9, Clause: 10, Attribute: "attr2", Filter: "filter2"}, + }, + Filters: []*netmap.Filter{ + {Name: "filter3", Key: "filter_key1", Op: 11, Value: "filter_val1", Filters: []*netmap.Filter{ + {Name: "filter4", Key: "filter_key2", Op: 12, Value: "filter_val2"}, + {Name: "filter5", Key: "filter_key3", Op: 13, Value: "filter_val3"}, + }}, + {Name: "filter6", Key: "filter_key4", Op: 14, Value: "filter_val4", Filters: []*netmap.Filter{ + {Name: "filter7", Key: "filter_key5", Op: 15, Value: "filter_val5"}, + {Name: "filter8", Key: "filter_key6", Op: 16, Value: "filter_val6"}, + }}, + }, + SubnetId: &refs.SubnetID{Value: 17}, + }, + }, + Signature: &refs.SignatureRFC6979{Key: []byte("any_pubkey"), Sign: []byte("any_signature")}, + SessionToken: &session.SessionToken{ + Body: &session.SessionToken_Body{ + Id: []byte("any_ID"), + OwnerId: &refs.OwnerID{Value: []byte("any_owner")}, + Lifetime: &session.SessionToken_Body_TokenLifetime{Exp: 18, Nbf: 19, Iat: 20}, + SessionKey: []byte("any_key"), + }, + Signature: &refs.Signature{Key: []byte("any_key"), Sign: []byte("any_signature"), Scheme: 21}, + }, + } + + testWithSessionContext := func(setCtx func(*session.SessionToken_Body)) { + setCtx(v.SessionToken.Body) + sz := v.MarshaledSize() + b := make([]byte, sz) + v.MarshalStable(b) + + var res container.GetResponse_Body + err := proto.Unmarshal(b, &res) + require.NoError(t, err) + require.Empty(t, res.ProtoReflect().GetUnknown()) + require.Equal(t, v.Container, res.Container) + require.Equal(t, v.Signature, res.Signature) + require.Equal(t, v.SessionToken, res.SessionToken) + } + + testWithSessionContext(func(body *session.SessionToken_Body) { body.Context = nil }) + testWithSessionContext(func(body *session.SessionToken_Body) { + body.Context = &session.SessionToken_Body_Container{ + Container: &session.ContainerSessionContext{ + Verb: 1, Wildcard: true, + ContainerId: &refs.ContainerID{Value: []byte("any_container")}, + }, + } + }) + testWithSessionContext(func(body *session.SessionToken_Body) { + body.Context = &session.SessionToken_Body_Object{ + Object: &session.ObjectSessionContext{ + Verb: 1, + Target: &session.ObjectSessionContext_Target{ + Container: &refs.ContainerID{Value: []byte("any_container")}, + Objects: []*refs.ObjectID{ + {Value: []byte("any_object1")}, + {Value: []byte("any_object2")}, + }, + }, + }, + } + }) +} + +func TestListRequest_Body(t *testing.T) { + v := &container.ListRequest_Body{ + OwnerId: &refs.OwnerID{Value: []byte("any_owner")}, + } + + sz := v.MarshaledSize() + b := make([]byte, sz) + v.MarshalStable(b) + + var res container.ListRequest_Body + err := proto.Unmarshal(b, &res) + require.NoError(t, err) + require.Empty(t, res.ProtoReflect().GetUnknown()) + require.Equal(t, v.OwnerId, res.OwnerId) +} + +func TestListResponse_Body(t *testing.T) { + v := &container.ListResponse_Body{ + ContainerIds: []*refs.ContainerID{ + {Value: []byte("any_container1")}, + {Value: []byte("any_container2")}, + }, + } + + sz := v.MarshaledSize() + b := make([]byte, sz) + v.MarshalStable(b) + + var res container.ListResponse_Body + err := proto.Unmarshal(b, &res) + require.NoError(t, err) + require.Empty(t, res.ProtoReflect().GetUnknown()) + require.Equal(t, v.ContainerIds, res.ContainerIds) +} + +func TestSetExtendedACLRequest_Body(t *testing.T) { + v := &container.SetExtendedACLRequest_Body{ + Eacl: &acl.EACLTable{ + Version: &refs.Version{Major: 123, Minor: 456}, + ContainerId: &refs.ContainerID{Value: []byte("any_container")}, + Records: []*acl.EACLRecord{ + { + Operation: 1, Action: 2, + Filters: []*acl.EACLRecord_Filter{ + {HeaderType: 3, MatchType: 4, Key: "key1", Value: "val1"}, + {HeaderType: 5, MatchType: 6, Key: "key2", Value: "val2"}, + }, + Targets: []*acl.EACLRecord_Target{ + {Role: 7, Keys: [][]byte{{0}, {1}}}, + {Role: 8, Keys: [][]byte{{2}, {3}}}, + }, + }, + { + Operation: 9, Action: 10, + Filters: []*acl.EACLRecord_Filter{ + {HeaderType: 11, MatchType: 12, Key: "key3", Value: "val3"}, + {HeaderType: 13, MatchType: 14, Key: "key4", Value: "val4"}, + }, + Targets: []*acl.EACLRecord_Target{ + {Role: 15, Keys: [][]byte{{4}, {5}}}, + {Role: 16, Keys: [][]byte{{6}, {7}}}, + }, + }, + }, + }, + Signature: &refs.SignatureRFC6979{Key: []byte("any_pubkey"), Sign: []byte("any_signature")}, + } + + sz := v.MarshaledSize() + b := make([]byte, sz) + v.MarshalStable(b) + + var res container.SetExtendedACLRequest_Body + err := proto.Unmarshal(b, &res) + require.NoError(t, err) + require.Empty(t, res.ProtoReflect().GetUnknown()) + require.Equal(t, v.Eacl, res.Eacl) + require.Equal(t, v.Signature, res.Signature) +} + +func TestSetExtendedACLResponse_Body(t *testing.T) { + var v container.SetExtendedACLResponse_Body + require.Zero(t, v.MarshaledSize()) + require.NotPanics(t, func() { v.MarshalStable(nil) }) + b := []byte("not_a_protobuf") + v.MarshalStable(b) + require.EqualValues(t, "not_a_protobuf", b) +} + +func TestGetExtendedACLRequest_Body(t *testing.T) { + v := &container.GetExtendedACLRequest_Body{ + ContainerId: &refs.ContainerID{Value: []byte("any_container")}, + } + + sz := v.MarshaledSize() + b := make([]byte, sz) + v.MarshalStable(b) + + var res container.GetExtendedACLRequest_Body + err := proto.Unmarshal(b, &res) + require.NoError(t, err) + require.Empty(t, res.ProtoReflect().GetUnknown()) + require.Equal(t, v.ContainerId, res.ContainerId) +} + +func TestGetExtendedACLResponse(t *testing.T) { + v := &container.GetExtendedACLResponse_Body{ + Eacl: &acl.EACLTable{ + Version: &refs.Version{Major: 123, Minor: 456}, + ContainerId: &refs.ContainerID{Value: []byte("any_container")}, + Records: []*acl.EACLRecord{ + { + Operation: 1, Action: 2, + Filters: []*acl.EACLRecord_Filter{ + {HeaderType: 3, MatchType: 4, Key: "key1", Value: "val1"}, + {HeaderType: 5, MatchType: 6, Key: "key2", Value: "val2"}, + }, + Targets: []*acl.EACLRecord_Target{ + {Role: 7, Keys: [][]byte{{0}, {1}}}, + {Role: 8, Keys: [][]byte{{2}, {3}}}, + }, + }, + { + Operation: 9, Action: 10, + Filters: []*acl.EACLRecord_Filter{ + {HeaderType: 11, MatchType: 12, Key: "key3", Value: "val3"}, + {HeaderType: 13, MatchType: 14, Key: "key4", Value: "val4"}, + }, + Targets: []*acl.EACLRecord_Target{ + {Role: 15, Keys: [][]byte{{4}, {5}}}, + {Role: 16, Keys: [][]byte{{6}, {7}}}, + }, + }, + }, + }, + Signature: &refs.SignatureRFC6979{Key: []byte("any_pubkey"), Sign: []byte("any_signature")}, + SessionToken: &session.SessionToken{ + Body: &session.SessionToken_Body{ + Id: []byte("any_ID"), + OwnerId: &refs.OwnerID{Value: []byte("any_owner")}, + Lifetime: &session.SessionToken_Body_TokenLifetime{Exp: 18, Nbf: 19, Iat: 20}, + SessionKey: []byte("any_key"), + }, + Signature: &refs.Signature{Key: []byte("any_key"), Sign: []byte("any_signature"), Scheme: 21}, + }, + } + + testWithSessionContext := func(setCtx func(*session.SessionToken_Body)) { + setCtx(v.SessionToken.Body) + sz := v.MarshaledSize() + b := make([]byte, sz) + v.MarshalStable(b) + + var res container.GetExtendedACLResponse_Body + err := proto.Unmarshal(b, &res) + require.NoError(t, err) + require.Empty(t, res.ProtoReflect().GetUnknown()) + require.Equal(t, v.Eacl, res.Eacl) + require.Equal(t, v.Signature, res.Signature) + require.Equal(t, v.SessionToken, res.SessionToken) + } + + testWithSessionContext(func(body *session.SessionToken_Body) { body.Context = nil }) + testWithSessionContext(func(body *session.SessionToken_Body) { + body.Context = &session.SessionToken_Body_Container{ + Container: &session.ContainerSessionContext{ + Verb: 1, Wildcard: true, + ContainerId: &refs.ContainerID{Value: []byte("any_container")}, + }, + } + }) + testWithSessionContext(func(body *session.SessionToken_Body) { + body.Context = &session.SessionToken_Body_Object{ + Object: &session.ObjectSessionContext{ + Verb: 1, + Target: &session.ObjectSessionContext_Target{ + Container: &refs.ContainerID{Value: []byte("any_container")}, + Objects: []*refs.ObjectID{ + {Value: []byte("any_object1")}, + {Value: []byte("any_object2")}, + }, + }, + }, + } + }) +} + +func TestAnnounceUsedSpaceRequest_Body(t *testing.T) { + v := &container.AnnounceUsedSpaceRequest_Body{ + Announcements: []*container.AnnounceUsedSpaceRequest_Body_Announcement{ + {Epoch: 1, ContainerId: &refs.ContainerID{Value: []byte("any_container1")}, UsedSpace: 2}, + {Epoch: 3, ContainerId: &refs.ContainerID{Value: []byte("any_container2")}, UsedSpace: 4}, + }, + } + + sz := v.MarshaledSize() + b := make([]byte, sz) + v.MarshalStable(b) + + var res container.AnnounceUsedSpaceRequest_Body + err := proto.Unmarshal(b, &res) + require.NoError(t, err) + require.Empty(t, res.ProtoReflect().GetUnknown()) + require.Equal(t, v.Announcements, res.Announcements) +} + +func TestAnnounceUsedSpaceResponse_Body(t *testing.T) { + var v container.AnnounceUsedSpaceResponse_Body + require.Zero(t, v.MarshaledSize()) + require.NotPanics(t, func() { v.MarshalStable(nil) }) + b := []byte("not_a_protobuf") + v.MarshalStable(b) + require.EqualValues(t, "not_a_protobuf", b) +} diff --git a/api/container/service.pb.go b/api/container/service.pb.go new file mode 100644 index 000000000..946c5c549 --- /dev/null +++ b/api/container/service.pb.go @@ -0,0 +1,2685 @@ +// Code generated by protoc-gen-go. DO NOT EDIT. +// versions: +// protoc-gen-go v1.32.0 +// protoc v4.25.1 +// source: container/grpc/service.proto + +package container + +import ( + reflect "reflect" + sync "sync" + + "github.com/nspcc-dev/neofs-sdk-go/api/acl" + "github.com/nspcc-dev/neofs-sdk-go/api/refs" + "github.com/nspcc-dev/neofs-sdk-go/api/session" + protoreflect "google.golang.org/protobuf/reflect/protoreflect" + protoimpl "google.golang.org/protobuf/runtime/protoimpl" +) + +const ( + // Verify that this generated code is sufficiently up-to-date. + _ = protoimpl.EnforceVersion(20 - protoimpl.MinVersion) + // Verify that runtime/protoimpl is sufficiently up-to-date. + _ = protoimpl.EnforceVersion(protoimpl.MaxVersion - 20) +) + +// New NeoFS Container creation request +type PutRequest struct { + state protoimpl.MessageState + sizeCache protoimpl.SizeCache + unknownFields protoimpl.UnknownFields + + // Body of container put request message. + Body *PutRequest_Body `protobuf:"bytes,1,opt,name=body,proto3" json:"body,omitempty"` + // Carries request meta information. Header data is used only to regulate + // message transport and does not affect request execution. + MetaHeader *session.RequestMetaHeader `protobuf:"bytes,2,opt,name=meta_header,json=metaHeader,proto3" json:"meta_header,omitempty"` + // Carries request verification information. This header is used to + // authenticate the nodes of the message route and check the correctness of + // transmission. + VerifyHeader *session.RequestVerificationHeader `protobuf:"bytes,3,opt,name=verify_header,json=verifyHeader,proto3" json:"verify_header,omitempty"` +} + +func (x *PutRequest) Reset() { + *x = PutRequest{} + if protoimpl.UnsafeEnabled { + mi := &file_container_grpc_service_proto_msgTypes[0] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) + } +} + +func (x *PutRequest) String() string { + return protoimpl.X.MessageStringOf(x) +} + +func (*PutRequest) ProtoMessage() {} + +func (x *PutRequest) ProtoReflect() protoreflect.Message { + mi := &file_container_grpc_service_proto_msgTypes[0] + if protoimpl.UnsafeEnabled && x != nil { + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + if ms.LoadMessageInfo() == nil { + ms.StoreMessageInfo(mi) + } + return ms + } + return mi.MessageOf(x) +} + +// Deprecated: Use PutRequest.ProtoReflect.Descriptor instead. +func (*PutRequest) Descriptor() ([]byte, []int) { + return file_container_grpc_service_proto_rawDescGZIP(), []int{0} +} + +func (x *PutRequest) GetBody() *PutRequest_Body { + if x != nil { + return x.Body + } + return nil +} + +func (x *PutRequest) GetMetaHeader() *session.RequestMetaHeader { + if x != nil { + return x.MetaHeader + } + return nil +} + +func (x *PutRequest) GetVerifyHeader() *session.RequestVerificationHeader { + if x != nil { + return x.VerifyHeader + } + return nil +} + +// New NeoFS Container creation response +type PutResponse struct { + state protoimpl.MessageState + sizeCache protoimpl.SizeCache + unknownFields protoimpl.UnknownFields + + // Body of container put response message. + Body *PutResponse_Body `protobuf:"bytes,1,opt,name=body,proto3" json:"body,omitempty"` + // Carries response meta information. Header data is used only to regulate + // message transport and does not affect request execution. + MetaHeader *session.ResponseMetaHeader `protobuf:"bytes,2,opt,name=meta_header,json=metaHeader,proto3" json:"meta_header,omitempty"` + // Carries response verification information. This header is used to + // authenticate the nodes of the message route and check the correctness of + // transmission. + VerifyHeader *session.ResponseVerificationHeader `protobuf:"bytes,3,opt,name=verify_header,json=verifyHeader,proto3" json:"verify_header,omitempty"` +} + +func (x *PutResponse) Reset() { + *x = PutResponse{} + if protoimpl.UnsafeEnabled { + mi := &file_container_grpc_service_proto_msgTypes[1] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) + } +} + +func (x *PutResponse) String() string { + return protoimpl.X.MessageStringOf(x) +} + +func (*PutResponse) ProtoMessage() {} + +func (x *PutResponse) ProtoReflect() protoreflect.Message { + mi := &file_container_grpc_service_proto_msgTypes[1] + if protoimpl.UnsafeEnabled && x != nil { + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + if ms.LoadMessageInfo() == nil { + ms.StoreMessageInfo(mi) + } + return ms + } + return mi.MessageOf(x) +} + +// Deprecated: Use PutResponse.ProtoReflect.Descriptor instead. +func (*PutResponse) Descriptor() ([]byte, []int) { + return file_container_grpc_service_proto_rawDescGZIP(), []int{1} +} + +func (x *PutResponse) GetBody() *PutResponse_Body { + if x != nil { + return x.Body + } + return nil +} + +func (x *PutResponse) GetMetaHeader() *session.ResponseMetaHeader { + if x != nil { + return x.MetaHeader + } + return nil +} + +func (x *PutResponse) GetVerifyHeader() *session.ResponseVerificationHeader { + if x != nil { + return x.VerifyHeader + } + return nil +} + +// Container removal request +type DeleteRequest struct { + state protoimpl.MessageState + sizeCache protoimpl.SizeCache + unknownFields protoimpl.UnknownFields + + // Body of container delete request message. + Body *DeleteRequest_Body `protobuf:"bytes,1,opt,name=body,proto3" json:"body,omitempty"` + // Carries request meta information. Header data is used only to regulate + // message transport and does not affect request execution. + MetaHeader *session.RequestMetaHeader `protobuf:"bytes,2,opt,name=meta_header,json=metaHeader,proto3" json:"meta_header,omitempty"` + // Carries request verification information. This header is used to + // authenticate the nodes of the message route and check the correctness of + // transmission. + VerifyHeader *session.RequestVerificationHeader `protobuf:"bytes,3,opt,name=verify_header,json=verifyHeader,proto3" json:"verify_header,omitempty"` +} + +func (x *DeleteRequest) Reset() { + *x = DeleteRequest{} + if protoimpl.UnsafeEnabled { + mi := &file_container_grpc_service_proto_msgTypes[2] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) + } +} + +func (x *DeleteRequest) String() string { + return protoimpl.X.MessageStringOf(x) +} + +func (*DeleteRequest) ProtoMessage() {} + +func (x *DeleteRequest) ProtoReflect() protoreflect.Message { + mi := &file_container_grpc_service_proto_msgTypes[2] + if protoimpl.UnsafeEnabled && x != nil { + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + if ms.LoadMessageInfo() == nil { + ms.StoreMessageInfo(mi) + } + return ms + } + return mi.MessageOf(x) +} + +// Deprecated: Use DeleteRequest.ProtoReflect.Descriptor instead. +func (*DeleteRequest) Descriptor() ([]byte, []int) { + return file_container_grpc_service_proto_rawDescGZIP(), []int{2} +} + +func (x *DeleteRequest) GetBody() *DeleteRequest_Body { + if x != nil { + return x.Body + } + return nil +} + +func (x *DeleteRequest) GetMetaHeader() *session.RequestMetaHeader { + if x != nil { + return x.MetaHeader + } + return nil +} + +func (x *DeleteRequest) GetVerifyHeader() *session.RequestVerificationHeader { + if x != nil { + return x.VerifyHeader + } + return nil +} + +// `DeleteResponse` has an empty body because delete operation is asynchronous +// and done via consensus in Inner Ring nodes. +type DeleteResponse struct { + state protoimpl.MessageState + sizeCache protoimpl.SizeCache + unknownFields protoimpl.UnknownFields + + // Body of container delete response message. + Body *DeleteResponse_Body `protobuf:"bytes,1,opt,name=body,proto3" json:"body,omitempty"` + // Carries response meta information. Header data is used only to regulate + // message transport and does not affect request execution. + MetaHeader *session.ResponseMetaHeader `protobuf:"bytes,2,opt,name=meta_header,json=metaHeader,proto3" json:"meta_header,omitempty"` + // Carries response verification information. This header is used to + // authenticate the nodes of the message route and check the correctness of + // transmission. + VerifyHeader *session.ResponseVerificationHeader `protobuf:"bytes,3,opt,name=verify_header,json=verifyHeader,proto3" json:"verify_header,omitempty"` +} + +func (x *DeleteResponse) Reset() { + *x = DeleteResponse{} + if protoimpl.UnsafeEnabled { + mi := &file_container_grpc_service_proto_msgTypes[3] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) + } +} + +func (x *DeleteResponse) String() string { + return protoimpl.X.MessageStringOf(x) +} + +func (*DeleteResponse) ProtoMessage() {} + +func (x *DeleteResponse) ProtoReflect() protoreflect.Message { + mi := &file_container_grpc_service_proto_msgTypes[3] + if protoimpl.UnsafeEnabled && x != nil { + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + if ms.LoadMessageInfo() == nil { + ms.StoreMessageInfo(mi) + } + return ms + } + return mi.MessageOf(x) +} + +// Deprecated: Use DeleteResponse.ProtoReflect.Descriptor instead. +func (*DeleteResponse) Descriptor() ([]byte, []int) { + return file_container_grpc_service_proto_rawDescGZIP(), []int{3} +} + +func (x *DeleteResponse) GetBody() *DeleteResponse_Body { + if x != nil { + return x.Body + } + return nil +} + +func (x *DeleteResponse) GetMetaHeader() *session.ResponseMetaHeader { + if x != nil { + return x.MetaHeader + } + return nil +} + +func (x *DeleteResponse) GetVerifyHeader() *session.ResponseVerificationHeader { + if x != nil { + return x.VerifyHeader + } + return nil +} + +// Get container structure +type GetRequest struct { + state protoimpl.MessageState + sizeCache protoimpl.SizeCache + unknownFields protoimpl.UnknownFields + + // Body of container get request message. + Body *GetRequest_Body `protobuf:"bytes,1,opt,name=body,proto3" json:"body,omitempty"` + // Carries request meta information. Header data is used only to regulate + // message transport and does not affect request execution. + MetaHeader *session.RequestMetaHeader `protobuf:"bytes,2,opt,name=meta_header,json=metaHeader,proto3" json:"meta_header,omitempty"` + // Carries request verification information. This header is used to + // authenticate the nodes of the message route and check the correctness of + // transmission. + VerifyHeader *session.RequestVerificationHeader `protobuf:"bytes,3,opt,name=verify_header,json=verifyHeader,proto3" json:"verify_header,omitempty"` +} + +func (x *GetRequest) Reset() { + *x = GetRequest{} + if protoimpl.UnsafeEnabled { + mi := &file_container_grpc_service_proto_msgTypes[4] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) + } +} + +func (x *GetRequest) String() string { + return protoimpl.X.MessageStringOf(x) +} + +func (*GetRequest) ProtoMessage() {} + +func (x *GetRequest) ProtoReflect() protoreflect.Message { + mi := &file_container_grpc_service_proto_msgTypes[4] + if protoimpl.UnsafeEnabled && x != nil { + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + if ms.LoadMessageInfo() == nil { + ms.StoreMessageInfo(mi) + } + return ms + } + return mi.MessageOf(x) +} + +// Deprecated: Use GetRequest.ProtoReflect.Descriptor instead. +func (*GetRequest) Descriptor() ([]byte, []int) { + return file_container_grpc_service_proto_rawDescGZIP(), []int{4} +} + +func (x *GetRequest) GetBody() *GetRequest_Body { + if x != nil { + return x.Body + } + return nil +} + +func (x *GetRequest) GetMetaHeader() *session.RequestMetaHeader { + if x != nil { + return x.MetaHeader + } + return nil +} + +func (x *GetRequest) GetVerifyHeader() *session.RequestVerificationHeader { + if x != nil { + return x.VerifyHeader + } + return nil +} + +// Get container structure +type GetResponse struct { + state protoimpl.MessageState + sizeCache protoimpl.SizeCache + unknownFields protoimpl.UnknownFields + + // Body of container get response message. + Body *GetResponse_Body `protobuf:"bytes,1,opt,name=body,proto3" json:"body,omitempty"` + // Carries response meta information. Header data is used only to regulate + // message transport and does not affect request execution. + MetaHeader *session.ResponseMetaHeader `protobuf:"bytes,2,opt,name=meta_header,json=metaHeader,proto3" json:"meta_header,omitempty"` + // Carries response verification information. This header is used to + // authenticate the nodes of the message route and check the correctness of + // transmission. + VerifyHeader *session.ResponseVerificationHeader `protobuf:"bytes,3,opt,name=verify_header,json=verifyHeader,proto3" json:"verify_header,omitempty"` +} + +func (x *GetResponse) Reset() { + *x = GetResponse{} + if protoimpl.UnsafeEnabled { + mi := &file_container_grpc_service_proto_msgTypes[5] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) + } +} + +func (x *GetResponse) String() string { + return protoimpl.X.MessageStringOf(x) +} + +func (*GetResponse) ProtoMessage() {} + +func (x *GetResponse) ProtoReflect() protoreflect.Message { + mi := &file_container_grpc_service_proto_msgTypes[5] + if protoimpl.UnsafeEnabled && x != nil { + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + if ms.LoadMessageInfo() == nil { + ms.StoreMessageInfo(mi) + } + return ms + } + return mi.MessageOf(x) +} + +// Deprecated: Use GetResponse.ProtoReflect.Descriptor instead. +func (*GetResponse) Descriptor() ([]byte, []int) { + return file_container_grpc_service_proto_rawDescGZIP(), []int{5} +} + +func (x *GetResponse) GetBody() *GetResponse_Body { + if x != nil { + return x.Body + } + return nil +} + +func (x *GetResponse) GetMetaHeader() *session.ResponseMetaHeader { + if x != nil { + return x.MetaHeader + } + return nil +} + +func (x *GetResponse) GetVerifyHeader() *session.ResponseVerificationHeader { + if x != nil { + return x.VerifyHeader + } + return nil +} + +// List containers +type ListRequest struct { + state protoimpl.MessageState + sizeCache protoimpl.SizeCache + unknownFields protoimpl.UnknownFields + + // Body of list containers request message + Body *ListRequest_Body `protobuf:"bytes,1,opt,name=body,proto3" json:"body,omitempty"` + // Carries request meta information. Header data is used only to regulate + // message transport and does not affect request execution. + MetaHeader *session.RequestMetaHeader `protobuf:"bytes,2,opt,name=meta_header,json=metaHeader,proto3" json:"meta_header,omitempty"` + // Carries request verification information. This header is used to + // authenticate the nodes of the message route and check the correctness of + // transmission. + VerifyHeader *session.RequestVerificationHeader `protobuf:"bytes,3,opt,name=verify_header,json=verifyHeader,proto3" json:"verify_header,omitempty"` +} + +func (x *ListRequest) Reset() { + *x = ListRequest{} + if protoimpl.UnsafeEnabled { + mi := &file_container_grpc_service_proto_msgTypes[6] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) + } +} + +func (x *ListRequest) String() string { + return protoimpl.X.MessageStringOf(x) +} + +func (*ListRequest) ProtoMessage() {} + +func (x *ListRequest) ProtoReflect() protoreflect.Message { + mi := &file_container_grpc_service_proto_msgTypes[6] + if protoimpl.UnsafeEnabled && x != nil { + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + if ms.LoadMessageInfo() == nil { + ms.StoreMessageInfo(mi) + } + return ms + } + return mi.MessageOf(x) +} + +// Deprecated: Use ListRequest.ProtoReflect.Descriptor instead. +func (*ListRequest) Descriptor() ([]byte, []int) { + return file_container_grpc_service_proto_rawDescGZIP(), []int{6} +} + +func (x *ListRequest) GetBody() *ListRequest_Body { + if x != nil { + return x.Body + } + return nil +} + +func (x *ListRequest) GetMetaHeader() *session.RequestMetaHeader { + if x != nil { + return x.MetaHeader + } + return nil +} + +func (x *ListRequest) GetVerifyHeader() *session.RequestVerificationHeader { + if x != nil { + return x.VerifyHeader + } + return nil +} + +// List containers +type ListResponse struct { + state protoimpl.MessageState + sizeCache protoimpl.SizeCache + unknownFields protoimpl.UnknownFields + + // Body of list containers response message. + Body *ListResponse_Body `protobuf:"bytes,1,opt,name=body,proto3" json:"body,omitempty"` + // Carries response meta information. Header data is used only to regulate + // message transport and does not affect request execution. + MetaHeader *session.ResponseMetaHeader `protobuf:"bytes,2,opt,name=meta_header,json=metaHeader,proto3" json:"meta_header,omitempty"` + // Carries response verification information. This header is used to + // authenticate the nodes of the message route and check the correctness of + // transmission. + VerifyHeader *session.ResponseVerificationHeader `protobuf:"bytes,3,opt,name=verify_header,json=verifyHeader,proto3" json:"verify_header,omitempty"` +} + +func (x *ListResponse) Reset() { + *x = ListResponse{} + if protoimpl.UnsafeEnabled { + mi := &file_container_grpc_service_proto_msgTypes[7] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) + } +} + +func (x *ListResponse) String() string { + return protoimpl.X.MessageStringOf(x) +} + +func (*ListResponse) ProtoMessage() {} + +func (x *ListResponse) ProtoReflect() protoreflect.Message { + mi := &file_container_grpc_service_proto_msgTypes[7] + if protoimpl.UnsafeEnabled && x != nil { + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + if ms.LoadMessageInfo() == nil { + ms.StoreMessageInfo(mi) + } + return ms + } + return mi.MessageOf(x) +} + +// Deprecated: Use ListResponse.ProtoReflect.Descriptor instead. +func (*ListResponse) Descriptor() ([]byte, []int) { + return file_container_grpc_service_proto_rawDescGZIP(), []int{7} +} + +func (x *ListResponse) GetBody() *ListResponse_Body { + if x != nil { + return x.Body + } + return nil +} + +func (x *ListResponse) GetMetaHeader() *session.ResponseMetaHeader { + if x != nil { + return x.MetaHeader + } + return nil +} + +func (x *ListResponse) GetVerifyHeader() *session.ResponseVerificationHeader { + if x != nil { + return x.VerifyHeader + } + return nil +} + +// Set Extended ACL +type SetExtendedACLRequest struct { + state protoimpl.MessageState + sizeCache protoimpl.SizeCache + unknownFields protoimpl.UnknownFields + + // Body of set extended acl request message. + Body *SetExtendedACLRequest_Body `protobuf:"bytes,1,opt,name=body,proto3" json:"body,omitempty"` + // Carries request meta information. Header data is used only to regulate + // message transport and does not affect request execution. + MetaHeader *session.RequestMetaHeader `protobuf:"bytes,2,opt,name=meta_header,json=metaHeader,proto3" json:"meta_header,omitempty"` + // Carries request verification information. This header is used to + // authenticate the nodes of the message route and check the correctness of + // transmission. + VerifyHeader *session.RequestVerificationHeader `protobuf:"bytes,3,opt,name=verify_header,json=verifyHeader,proto3" json:"verify_header,omitempty"` +} + +func (x *SetExtendedACLRequest) Reset() { + *x = SetExtendedACLRequest{} + if protoimpl.UnsafeEnabled { + mi := &file_container_grpc_service_proto_msgTypes[8] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) + } +} + +func (x *SetExtendedACLRequest) String() string { + return protoimpl.X.MessageStringOf(x) +} + +func (*SetExtendedACLRequest) ProtoMessage() {} + +func (x *SetExtendedACLRequest) ProtoReflect() protoreflect.Message { + mi := &file_container_grpc_service_proto_msgTypes[8] + if protoimpl.UnsafeEnabled && x != nil { + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + if ms.LoadMessageInfo() == nil { + ms.StoreMessageInfo(mi) + } + return ms + } + return mi.MessageOf(x) +} + +// Deprecated: Use SetExtendedACLRequest.ProtoReflect.Descriptor instead. +func (*SetExtendedACLRequest) Descriptor() ([]byte, []int) { + return file_container_grpc_service_proto_rawDescGZIP(), []int{8} +} + +func (x *SetExtendedACLRequest) GetBody() *SetExtendedACLRequest_Body { + if x != nil { + return x.Body + } + return nil +} + +func (x *SetExtendedACLRequest) GetMetaHeader() *session.RequestMetaHeader { + if x != nil { + return x.MetaHeader + } + return nil +} + +func (x *SetExtendedACLRequest) GetVerifyHeader() *session.RequestVerificationHeader { + if x != nil { + return x.VerifyHeader + } + return nil +} + +// Set Extended ACL +type SetExtendedACLResponse struct { + state protoimpl.MessageState + sizeCache protoimpl.SizeCache + unknownFields protoimpl.UnknownFields + + // Body of set extended acl response message. + Body *SetExtendedACLResponse_Body `protobuf:"bytes,1,opt,name=body,proto3" json:"body,omitempty"` + // Carries response meta information. Header data is used only to regulate + // message transport and does not affect request execution. + MetaHeader *session.ResponseMetaHeader `protobuf:"bytes,2,opt,name=meta_header,json=metaHeader,proto3" json:"meta_header,omitempty"` + // Carries response verification information. This header is used to + // authenticate the nodes of the message route and check the correctness of + // transmission. + VerifyHeader *session.ResponseVerificationHeader `protobuf:"bytes,3,opt,name=verify_header,json=verifyHeader,proto3" json:"verify_header,omitempty"` +} + +func (x *SetExtendedACLResponse) Reset() { + *x = SetExtendedACLResponse{} + if protoimpl.UnsafeEnabled { + mi := &file_container_grpc_service_proto_msgTypes[9] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) + } +} + +func (x *SetExtendedACLResponse) String() string { + return protoimpl.X.MessageStringOf(x) +} + +func (*SetExtendedACLResponse) ProtoMessage() {} + +func (x *SetExtendedACLResponse) ProtoReflect() protoreflect.Message { + mi := &file_container_grpc_service_proto_msgTypes[9] + if protoimpl.UnsafeEnabled && x != nil { + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + if ms.LoadMessageInfo() == nil { + ms.StoreMessageInfo(mi) + } + return ms + } + return mi.MessageOf(x) +} + +// Deprecated: Use SetExtendedACLResponse.ProtoReflect.Descriptor instead. +func (*SetExtendedACLResponse) Descriptor() ([]byte, []int) { + return file_container_grpc_service_proto_rawDescGZIP(), []int{9} +} + +func (x *SetExtendedACLResponse) GetBody() *SetExtendedACLResponse_Body { + if x != nil { + return x.Body + } + return nil +} + +func (x *SetExtendedACLResponse) GetMetaHeader() *session.ResponseMetaHeader { + if x != nil { + return x.MetaHeader + } + return nil +} + +func (x *SetExtendedACLResponse) GetVerifyHeader() *session.ResponseVerificationHeader { + if x != nil { + return x.VerifyHeader + } + return nil +} + +// Get Extended ACL +type GetExtendedACLRequest struct { + state protoimpl.MessageState + sizeCache protoimpl.SizeCache + unknownFields protoimpl.UnknownFields + + // Body of get extended acl request message. + Body *GetExtendedACLRequest_Body `protobuf:"bytes,1,opt,name=body,proto3" json:"body,omitempty"` + // Carries request meta information. Header data is used only to regulate + // message transport and does not affect request execution. + MetaHeader *session.RequestMetaHeader `protobuf:"bytes,2,opt,name=meta_header,json=metaHeader,proto3" json:"meta_header,omitempty"` + // Carries request verification information. This header is used to + // authenticate the nodes of the message route and check the correctness of + // transmission. + VerifyHeader *session.RequestVerificationHeader `protobuf:"bytes,3,opt,name=verify_header,json=verifyHeader,proto3" json:"verify_header,omitempty"` +} + +func (x *GetExtendedACLRequest) Reset() { + *x = GetExtendedACLRequest{} + if protoimpl.UnsafeEnabled { + mi := &file_container_grpc_service_proto_msgTypes[10] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) + } +} + +func (x *GetExtendedACLRequest) String() string { + return protoimpl.X.MessageStringOf(x) +} + +func (*GetExtendedACLRequest) ProtoMessage() {} + +func (x *GetExtendedACLRequest) ProtoReflect() protoreflect.Message { + mi := &file_container_grpc_service_proto_msgTypes[10] + if protoimpl.UnsafeEnabled && x != nil { + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + if ms.LoadMessageInfo() == nil { + ms.StoreMessageInfo(mi) + } + return ms + } + return mi.MessageOf(x) +} + +// Deprecated: Use GetExtendedACLRequest.ProtoReflect.Descriptor instead. +func (*GetExtendedACLRequest) Descriptor() ([]byte, []int) { + return file_container_grpc_service_proto_rawDescGZIP(), []int{10} +} + +func (x *GetExtendedACLRequest) GetBody() *GetExtendedACLRequest_Body { + if x != nil { + return x.Body + } + return nil +} + +func (x *GetExtendedACLRequest) GetMetaHeader() *session.RequestMetaHeader { + if x != nil { + return x.MetaHeader + } + return nil +} + +func (x *GetExtendedACLRequest) GetVerifyHeader() *session.RequestVerificationHeader { + if x != nil { + return x.VerifyHeader + } + return nil +} + +// Get Extended ACL +type GetExtendedACLResponse struct { + state protoimpl.MessageState + sizeCache protoimpl.SizeCache + unknownFields protoimpl.UnknownFields + + // Body of get extended acl response message. + Body *GetExtendedACLResponse_Body `protobuf:"bytes,1,opt,name=body,proto3" json:"body,omitempty"` + // Carries response meta information. Header data is used only to regulate + // message transport and does not affect request execution. + MetaHeader *session.ResponseMetaHeader `protobuf:"bytes,2,opt,name=meta_header,json=metaHeader,proto3" json:"meta_header,omitempty"` + // Carries response verification information. This header is used to + // authenticate the nodes of the message route and check the correctness of + // transmission. + VerifyHeader *session.ResponseVerificationHeader `protobuf:"bytes,3,opt,name=verify_header,json=verifyHeader,proto3" json:"verify_header,omitempty"` +} + +func (x *GetExtendedACLResponse) Reset() { + *x = GetExtendedACLResponse{} + if protoimpl.UnsafeEnabled { + mi := &file_container_grpc_service_proto_msgTypes[11] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) + } +} + +func (x *GetExtendedACLResponse) String() string { + return protoimpl.X.MessageStringOf(x) +} + +func (*GetExtendedACLResponse) ProtoMessage() {} + +func (x *GetExtendedACLResponse) ProtoReflect() protoreflect.Message { + mi := &file_container_grpc_service_proto_msgTypes[11] + if protoimpl.UnsafeEnabled && x != nil { + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + if ms.LoadMessageInfo() == nil { + ms.StoreMessageInfo(mi) + } + return ms + } + return mi.MessageOf(x) +} + +// Deprecated: Use GetExtendedACLResponse.ProtoReflect.Descriptor instead. +func (*GetExtendedACLResponse) Descriptor() ([]byte, []int) { + return file_container_grpc_service_proto_rawDescGZIP(), []int{11} +} + +func (x *GetExtendedACLResponse) GetBody() *GetExtendedACLResponse_Body { + if x != nil { + return x.Body + } + return nil +} + +func (x *GetExtendedACLResponse) GetMetaHeader() *session.ResponseMetaHeader { + if x != nil { + return x.MetaHeader + } + return nil +} + +func (x *GetExtendedACLResponse) GetVerifyHeader() *session.ResponseVerificationHeader { + if x != nil { + return x.VerifyHeader + } + return nil +} + +// Announce container used space +type AnnounceUsedSpaceRequest struct { + state protoimpl.MessageState + sizeCache protoimpl.SizeCache + unknownFields protoimpl.UnknownFields + + // Body of announce used space request message. + Body *AnnounceUsedSpaceRequest_Body `protobuf:"bytes,1,opt,name=body,proto3" json:"body,omitempty"` + // Carries request meta information. Header data is used only to regulate + // message transport and does not affect request execution. + MetaHeader *session.RequestMetaHeader `protobuf:"bytes,2,opt,name=meta_header,json=metaHeader,proto3" json:"meta_header,omitempty"` + // Carries request verification information. This header is used to + // authenticate the nodes of the message route and check the correctness of + // transmission. + VerifyHeader *session.RequestVerificationHeader `protobuf:"bytes,3,opt,name=verify_header,json=verifyHeader,proto3" json:"verify_header,omitempty"` +} + +func (x *AnnounceUsedSpaceRequest) Reset() { + *x = AnnounceUsedSpaceRequest{} + if protoimpl.UnsafeEnabled { + mi := &file_container_grpc_service_proto_msgTypes[12] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) + } +} + +func (x *AnnounceUsedSpaceRequest) String() string { + return protoimpl.X.MessageStringOf(x) +} + +func (*AnnounceUsedSpaceRequest) ProtoMessage() {} + +func (x *AnnounceUsedSpaceRequest) ProtoReflect() protoreflect.Message { + mi := &file_container_grpc_service_proto_msgTypes[12] + if protoimpl.UnsafeEnabled && x != nil { + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + if ms.LoadMessageInfo() == nil { + ms.StoreMessageInfo(mi) + } + return ms + } + return mi.MessageOf(x) +} + +// Deprecated: Use AnnounceUsedSpaceRequest.ProtoReflect.Descriptor instead. +func (*AnnounceUsedSpaceRequest) Descriptor() ([]byte, []int) { + return file_container_grpc_service_proto_rawDescGZIP(), []int{12} +} + +func (x *AnnounceUsedSpaceRequest) GetBody() *AnnounceUsedSpaceRequest_Body { + if x != nil { + return x.Body + } + return nil +} + +func (x *AnnounceUsedSpaceRequest) GetMetaHeader() *session.RequestMetaHeader { + if x != nil { + return x.MetaHeader + } + return nil +} + +func (x *AnnounceUsedSpaceRequest) GetVerifyHeader() *session.RequestVerificationHeader { + if x != nil { + return x.VerifyHeader + } + return nil +} + +// Announce container used space +type AnnounceUsedSpaceResponse struct { + state protoimpl.MessageState + sizeCache protoimpl.SizeCache + unknownFields protoimpl.UnknownFields + + // Body of announce used space response message. + Body *AnnounceUsedSpaceResponse_Body `protobuf:"bytes,1,opt,name=body,proto3" json:"body,omitempty"` + // Carries response meta information. Header data is used only to regulate + // message transport and does not affect request execution. + MetaHeader *session.ResponseMetaHeader `protobuf:"bytes,2,opt,name=meta_header,json=metaHeader,proto3" json:"meta_header,omitempty"` + // Carries response verification information. This header is used to + // authenticate the nodes of the message route and check the correctness of + // transmission. + VerifyHeader *session.ResponseVerificationHeader `protobuf:"bytes,3,opt,name=verify_header,json=verifyHeader,proto3" json:"verify_header,omitempty"` +} + +func (x *AnnounceUsedSpaceResponse) Reset() { + *x = AnnounceUsedSpaceResponse{} + if protoimpl.UnsafeEnabled { + mi := &file_container_grpc_service_proto_msgTypes[13] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) + } +} + +func (x *AnnounceUsedSpaceResponse) String() string { + return protoimpl.X.MessageStringOf(x) +} + +func (*AnnounceUsedSpaceResponse) ProtoMessage() {} + +func (x *AnnounceUsedSpaceResponse) ProtoReflect() protoreflect.Message { + mi := &file_container_grpc_service_proto_msgTypes[13] + if protoimpl.UnsafeEnabled && x != nil { + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + if ms.LoadMessageInfo() == nil { + ms.StoreMessageInfo(mi) + } + return ms + } + return mi.MessageOf(x) +} + +// Deprecated: Use AnnounceUsedSpaceResponse.ProtoReflect.Descriptor instead. +func (*AnnounceUsedSpaceResponse) Descriptor() ([]byte, []int) { + return file_container_grpc_service_proto_rawDescGZIP(), []int{13} +} + +func (x *AnnounceUsedSpaceResponse) GetBody() *AnnounceUsedSpaceResponse_Body { + if x != nil { + return x.Body + } + return nil +} + +func (x *AnnounceUsedSpaceResponse) GetMetaHeader() *session.ResponseMetaHeader { + if x != nil { + return x.MetaHeader + } + return nil +} + +func (x *AnnounceUsedSpaceResponse) GetVerifyHeader() *session.ResponseVerificationHeader { + if x != nil { + return x.VerifyHeader + } + return nil +} + +// Container creation request has container structure's signature as a +// separate field. It's not stored in sidechain, just verified on container +// creation by `Container` smart contract. `ContainerID` is a SHA256 hash of +// the stable-marshalled container strucutre, hence there is no need for +// additional signature checks. +type PutRequest_Body struct { + state protoimpl.MessageState + sizeCache protoimpl.SizeCache + unknownFields protoimpl.UnknownFields + + // Container structure to register in NeoFS + Container *Container `protobuf:"bytes,1,opt,name=container,proto3" json:"container,omitempty"` + // Signature of a stable-marshalled container according to RFC-6979. + Signature *refs.SignatureRFC6979 `protobuf:"bytes,2,opt,name=signature,proto3" json:"signature,omitempty"` +} + +func (x *PutRequest_Body) Reset() { + *x = PutRequest_Body{} + if protoimpl.UnsafeEnabled { + mi := &file_container_grpc_service_proto_msgTypes[14] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) + } +} + +func (x *PutRequest_Body) String() string { + return protoimpl.X.MessageStringOf(x) +} + +func (*PutRequest_Body) ProtoMessage() {} + +func (x *PutRequest_Body) ProtoReflect() protoreflect.Message { + mi := &file_container_grpc_service_proto_msgTypes[14] + if protoimpl.UnsafeEnabled && x != nil { + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + if ms.LoadMessageInfo() == nil { + ms.StoreMessageInfo(mi) + } + return ms + } + return mi.MessageOf(x) +} + +// Deprecated: Use PutRequest_Body.ProtoReflect.Descriptor instead. +func (*PutRequest_Body) Descriptor() ([]byte, []int) { + return file_container_grpc_service_proto_rawDescGZIP(), []int{0, 0} +} + +func (x *PutRequest_Body) GetContainer() *Container { + if x != nil { + return x.Container + } + return nil +} + +func (x *PutRequest_Body) GetSignature() *refs.SignatureRFC6979 { + if x != nil { + return x.Signature + } + return nil +} + +// Container put response body contains information about the newly registered +// container as seen by `Container` smart contract. `ContainerID` can be +// calculated beforehand from the container structure and compared to the one +// returned here to make sure everything has been done as expected. +type PutResponse_Body struct { + state protoimpl.MessageState + sizeCache protoimpl.SizeCache + unknownFields protoimpl.UnknownFields + + // Unique identifier of the newly created container + ContainerId *refs.ContainerID `protobuf:"bytes,1,opt,name=container_id,json=containerId,proto3" json:"container_id,omitempty"` +} + +func (x *PutResponse_Body) Reset() { + *x = PutResponse_Body{} + if protoimpl.UnsafeEnabled { + mi := &file_container_grpc_service_proto_msgTypes[15] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) + } +} + +func (x *PutResponse_Body) String() string { + return protoimpl.X.MessageStringOf(x) +} + +func (*PutResponse_Body) ProtoMessage() {} + +func (x *PutResponse_Body) ProtoReflect() protoreflect.Message { + mi := &file_container_grpc_service_proto_msgTypes[15] + if protoimpl.UnsafeEnabled && x != nil { + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + if ms.LoadMessageInfo() == nil { + ms.StoreMessageInfo(mi) + } + return ms + } + return mi.MessageOf(x) +} + +// Deprecated: Use PutResponse_Body.ProtoReflect.Descriptor instead. +func (*PutResponse_Body) Descriptor() ([]byte, []int) { + return file_container_grpc_service_proto_rawDescGZIP(), []int{1, 0} +} + +func (x *PutResponse_Body) GetContainerId() *refs.ContainerID { + if x != nil { + return x.ContainerId + } + return nil +} + +// Container removal request body has signed `ContainerID` as a proof of +// the container owner's intent. The signature will be verified by `Container` +// smart contract, so signing algorithm must be supported by NeoVM. +type DeleteRequest_Body struct { + state protoimpl.MessageState + sizeCache protoimpl.SizeCache + unknownFields protoimpl.UnknownFields + + // Identifier of the container to delete from NeoFS + ContainerId *refs.ContainerID `protobuf:"bytes,1,opt,name=container_id,json=containerId,proto3" json:"container_id,omitempty"` + // `ContainerID` signed with the container owner's key according to RFC-6979. + Signature *refs.SignatureRFC6979 `protobuf:"bytes,2,opt,name=signature,proto3" json:"signature,omitempty"` +} + +func (x *DeleteRequest_Body) Reset() { + *x = DeleteRequest_Body{} + if protoimpl.UnsafeEnabled { + mi := &file_container_grpc_service_proto_msgTypes[16] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) + } +} + +func (x *DeleteRequest_Body) String() string { + return protoimpl.X.MessageStringOf(x) +} + +func (*DeleteRequest_Body) ProtoMessage() {} + +func (x *DeleteRequest_Body) ProtoReflect() protoreflect.Message { + mi := &file_container_grpc_service_proto_msgTypes[16] + if protoimpl.UnsafeEnabled && x != nil { + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + if ms.LoadMessageInfo() == nil { + ms.StoreMessageInfo(mi) + } + return ms + } + return mi.MessageOf(x) +} + +// Deprecated: Use DeleteRequest_Body.ProtoReflect.Descriptor instead. +func (*DeleteRequest_Body) Descriptor() ([]byte, []int) { + return file_container_grpc_service_proto_rawDescGZIP(), []int{2, 0} +} + +func (x *DeleteRequest_Body) GetContainerId() *refs.ContainerID { + if x != nil { + return x.ContainerId + } + return nil +} + +func (x *DeleteRequest_Body) GetSignature() *refs.SignatureRFC6979 { + if x != nil { + return x.Signature + } + return nil +} + +// `DeleteResponse` has an empty body because delete operation is asynchronous +// and done via consensus in Inner Ring nodes. +type DeleteResponse_Body struct { + state protoimpl.MessageState + sizeCache protoimpl.SizeCache + unknownFields protoimpl.UnknownFields +} + +func (x *DeleteResponse_Body) Reset() { + *x = DeleteResponse_Body{} + if protoimpl.UnsafeEnabled { + mi := &file_container_grpc_service_proto_msgTypes[17] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) + } +} + +func (x *DeleteResponse_Body) String() string { + return protoimpl.X.MessageStringOf(x) +} + +func (*DeleteResponse_Body) ProtoMessage() {} + +func (x *DeleteResponse_Body) ProtoReflect() protoreflect.Message { + mi := &file_container_grpc_service_proto_msgTypes[17] + if protoimpl.UnsafeEnabled && x != nil { + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + if ms.LoadMessageInfo() == nil { + ms.StoreMessageInfo(mi) + } + return ms + } + return mi.MessageOf(x) +} + +// Deprecated: Use DeleteResponse_Body.ProtoReflect.Descriptor instead. +func (*DeleteResponse_Body) Descriptor() ([]byte, []int) { + return file_container_grpc_service_proto_rawDescGZIP(), []int{3, 0} +} + +// Get container structure request body. +type GetRequest_Body struct { + state protoimpl.MessageState + sizeCache protoimpl.SizeCache + unknownFields protoimpl.UnknownFields + + // Identifier of the container to get + ContainerId *refs.ContainerID `protobuf:"bytes,1,opt,name=container_id,json=containerId,proto3" json:"container_id,omitempty"` +} + +func (x *GetRequest_Body) Reset() { + *x = GetRequest_Body{} + if protoimpl.UnsafeEnabled { + mi := &file_container_grpc_service_proto_msgTypes[18] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) + } +} + +func (x *GetRequest_Body) String() string { + return protoimpl.X.MessageStringOf(x) +} + +func (*GetRequest_Body) ProtoMessage() {} + +func (x *GetRequest_Body) ProtoReflect() protoreflect.Message { + mi := &file_container_grpc_service_proto_msgTypes[18] + if protoimpl.UnsafeEnabled && x != nil { + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + if ms.LoadMessageInfo() == nil { + ms.StoreMessageInfo(mi) + } + return ms + } + return mi.MessageOf(x) +} + +// Deprecated: Use GetRequest_Body.ProtoReflect.Descriptor instead. +func (*GetRequest_Body) Descriptor() ([]byte, []int) { + return file_container_grpc_service_proto_rawDescGZIP(), []int{4, 0} +} + +func (x *GetRequest_Body) GetContainerId() *refs.ContainerID { + if x != nil { + return x.ContainerId + } + return nil +} + +// Get container response body does not have container structure signature. It +// has been already verified upon container creation. +type GetResponse_Body struct { + state protoimpl.MessageState + sizeCache protoimpl.SizeCache + unknownFields protoimpl.UnknownFields + + // Requested container structure + Container *Container `protobuf:"bytes,1,opt,name=container,proto3" json:"container,omitempty"` + // Signature of a stable-marshalled container according to RFC-6979. + Signature *refs.SignatureRFC6979 `protobuf:"bytes,2,opt,name=signature,proto3" json:"signature,omitempty"` + // Session token if the container has been created within the session + SessionToken *session.SessionToken `protobuf:"bytes,3,opt,name=session_token,json=sessionToken,proto3" json:"session_token,omitempty"` +} + +func (x *GetResponse_Body) Reset() { + *x = GetResponse_Body{} + if protoimpl.UnsafeEnabled { + mi := &file_container_grpc_service_proto_msgTypes[19] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) + } +} + +func (x *GetResponse_Body) String() string { + return protoimpl.X.MessageStringOf(x) +} + +func (*GetResponse_Body) ProtoMessage() {} + +func (x *GetResponse_Body) ProtoReflect() protoreflect.Message { + mi := &file_container_grpc_service_proto_msgTypes[19] + if protoimpl.UnsafeEnabled && x != nil { + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + if ms.LoadMessageInfo() == nil { + ms.StoreMessageInfo(mi) + } + return ms + } + return mi.MessageOf(x) +} + +// Deprecated: Use GetResponse_Body.ProtoReflect.Descriptor instead. +func (*GetResponse_Body) Descriptor() ([]byte, []int) { + return file_container_grpc_service_proto_rawDescGZIP(), []int{5, 0} +} + +func (x *GetResponse_Body) GetContainer() *Container { + if x != nil { + return x.Container + } + return nil +} + +func (x *GetResponse_Body) GetSignature() *refs.SignatureRFC6979 { + if x != nil { + return x.Signature + } + return nil +} + +func (x *GetResponse_Body) GetSessionToken() *session.SessionToken { + if x != nil { + return x.SessionToken + } + return nil +} + +// List containers request body. +type ListRequest_Body struct { + state protoimpl.MessageState + sizeCache protoimpl.SizeCache + unknownFields protoimpl.UnknownFields + + // Identifier of the container owner + OwnerId *refs.OwnerID `protobuf:"bytes,1,opt,name=owner_id,json=ownerId,proto3" json:"owner_id,omitempty"` +} + +func (x *ListRequest_Body) Reset() { + *x = ListRequest_Body{} + if protoimpl.UnsafeEnabled { + mi := &file_container_grpc_service_proto_msgTypes[20] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) + } +} + +func (x *ListRequest_Body) String() string { + return protoimpl.X.MessageStringOf(x) +} + +func (*ListRequest_Body) ProtoMessage() {} + +func (x *ListRequest_Body) ProtoReflect() protoreflect.Message { + mi := &file_container_grpc_service_proto_msgTypes[20] + if protoimpl.UnsafeEnabled && x != nil { + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + if ms.LoadMessageInfo() == nil { + ms.StoreMessageInfo(mi) + } + return ms + } + return mi.MessageOf(x) +} + +// Deprecated: Use ListRequest_Body.ProtoReflect.Descriptor instead. +func (*ListRequest_Body) Descriptor() ([]byte, []int) { + return file_container_grpc_service_proto_rawDescGZIP(), []int{6, 0} +} + +func (x *ListRequest_Body) GetOwnerId() *refs.OwnerID { + if x != nil { + return x.OwnerId + } + return nil +} + +// List containers response body. +type ListResponse_Body struct { + state protoimpl.MessageState + sizeCache protoimpl.SizeCache + unknownFields protoimpl.UnknownFields + + // List of `ContainerID`s belonging to the requested `OwnerID` + ContainerIds []*refs.ContainerID `protobuf:"bytes,1,rep,name=container_ids,json=containerIds,proto3" json:"container_ids,omitempty"` +} + +func (x *ListResponse_Body) Reset() { + *x = ListResponse_Body{} + if protoimpl.UnsafeEnabled { + mi := &file_container_grpc_service_proto_msgTypes[21] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) + } +} + +func (x *ListResponse_Body) String() string { + return protoimpl.X.MessageStringOf(x) +} + +func (*ListResponse_Body) ProtoMessage() {} + +func (x *ListResponse_Body) ProtoReflect() protoreflect.Message { + mi := &file_container_grpc_service_proto_msgTypes[21] + if protoimpl.UnsafeEnabled && x != nil { + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + if ms.LoadMessageInfo() == nil { + ms.StoreMessageInfo(mi) + } + return ms + } + return mi.MessageOf(x) +} + +// Deprecated: Use ListResponse_Body.ProtoReflect.Descriptor instead. +func (*ListResponse_Body) Descriptor() ([]byte, []int) { + return file_container_grpc_service_proto_rawDescGZIP(), []int{7, 0} +} + +func (x *ListResponse_Body) GetContainerIds() []*refs.ContainerID { + if x != nil { + return x.ContainerIds + } + return nil +} + +// Set Extended ACL request body does not have separate `ContainerID` +// reference. It will be taken from `EACLTable.container_id` field. +type SetExtendedACLRequest_Body struct { + state protoimpl.MessageState + sizeCache protoimpl.SizeCache + unknownFields protoimpl.UnknownFields + + // Extended ACL table to set for the container + Eacl *acl.EACLTable `protobuf:"bytes,1,opt,name=eacl,proto3" json:"eacl,omitempty"` + // Signature of stable-marshalled Extended ACL table according to RFC-6979. + Signature *refs.SignatureRFC6979 `protobuf:"bytes,2,opt,name=signature,proto3" json:"signature,omitempty"` +} + +func (x *SetExtendedACLRequest_Body) Reset() { + *x = SetExtendedACLRequest_Body{} + if protoimpl.UnsafeEnabled { + mi := &file_container_grpc_service_proto_msgTypes[22] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) + } +} + +func (x *SetExtendedACLRequest_Body) String() string { + return protoimpl.X.MessageStringOf(x) +} + +func (*SetExtendedACLRequest_Body) ProtoMessage() {} + +func (x *SetExtendedACLRequest_Body) ProtoReflect() protoreflect.Message { + mi := &file_container_grpc_service_proto_msgTypes[22] + if protoimpl.UnsafeEnabled && x != nil { + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + if ms.LoadMessageInfo() == nil { + ms.StoreMessageInfo(mi) + } + return ms + } + return mi.MessageOf(x) +} + +// Deprecated: Use SetExtendedACLRequest_Body.ProtoReflect.Descriptor instead. +func (*SetExtendedACLRequest_Body) Descriptor() ([]byte, []int) { + return file_container_grpc_service_proto_rawDescGZIP(), []int{8, 0} +} + +func (x *SetExtendedACLRequest_Body) GetEacl() *acl.EACLTable { + if x != nil { + return x.Eacl + } + return nil +} + +func (x *SetExtendedACLRequest_Body) GetSignature() *refs.SignatureRFC6979 { + if x != nil { + return x.Signature + } + return nil +} + +// `SetExtendedACLResponse` has an empty body because the operation is +// asynchronous and the update should be reflected in `Container` smart contract's +// storage after next block is issued in sidechain. +type SetExtendedACLResponse_Body struct { + state protoimpl.MessageState + sizeCache protoimpl.SizeCache + unknownFields protoimpl.UnknownFields +} + +func (x *SetExtendedACLResponse_Body) Reset() { + *x = SetExtendedACLResponse_Body{} + if protoimpl.UnsafeEnabled { + mi := &file_container_grpc_service_proto_msgTypes[23] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) + } +} + +func (x *SetExtendedACLResponse_Body) String() string { + return protoimpl.X.MessageStringOf(x) +} + +func (*SetExtendedACLResponse_Body) ProtoMessage() {} + +func (x *SetExtendedACLResponse_Body) ProtoReflect() protoreflect.Message { + mi := &file_container_grpc_service_proto_msgTypes[23] + if protoimpl.UnsafeEnabled && x != nil { + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + if ms.LoadMessageInfo() == nil { + ms.StoreMessageInfo(mi) + } + return ms + } + return mi.MessageOf(x) +} + +// Deprecated: Use SetExtendedACLResponse_Body.ProtoReflect.Descriptor instead. +func (*SetExtendedACLResponse_Body) Descriptor() ([]byte, []int) { + return file_container_grpc_service_proto_rawDescGZIP(), []int{9, 0} +} + +// Get Extended ACL request body +type GetExtendedACLRequest_Body struct { + state protoimpl.MessageState + sizeCache protoimpl.SizeCache + unknownFields protoimpl.UnknownFields + + // Identifier of the container having Extended ACL + ContainerId *refs.ContainerID `protobuf:"bytes,1,opt,name=container_id,json=containerId,proto3" json:"container_id,omitempty"` +} + +func (x *GetExtendedACLRequest_Body) Reset() { + *x = GetExtendedACLRequest_Body{} + if protoimpl.UnsafeEnabled { + mi := &file_container_grpc_service_proto_msgTypes[24] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) + } +} + +func (x *GetExtendedACLRequest_Body) String() string { + return protoimpl.X.MessageStringOf(x) +} + +func (*GetExtendedACLRequest_Body) ProtoMessage() {} + +func (x *GetExtendedACLRequest_Body) ProtoReflect() protoreflect.Message { + mi := &file_container_grpc_service_proto_msgTypes[24] + if protoimpl.UnsafeEnabled && x != nil { + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + if ms.LoadMessageInfo() == nil { + ms.StoreMessageInfo(mi) + } + return ms + } + return mi.MessageOf(x) +} + +// Deprecated: Use GetExtendedACLRequest_Body.ProtoReflect.Descriptor instead. +func (*GetExtendedACLRequest_Body) Descriptor() ([]byte, []int) { + return file_container_grpc_service_proto_rawDescGZIP(), []int{10, 0} +} + +func (x *GetExtendedACLRequest_Body) GetContainerId() *refs.ContainerID { + if x != nil { + return x.ContainerId + } + return nil +} + +// Get Extended ACL Response body can be empty if the requested container does +// not have Extended ACL Table attached or Extended ACL has not been allowed at +// the time of container creation. +type GetExtendedACLResponse_Body struct { + state protoimpl.MessageState + sizeCache protoimpl.SizeCache + unknownFields protoimpl.UnknownFields + + // Extended ACL requested, if available + Eacl *acl.EACLTable `protobuf:"bytes,1,opt,name=eacl,proto3" json:"eacl,omitempty"` + // Signature of stable-marshalled Extended ACL according to RFC-6979. + Signature *refs.SignatureRFC6979 `protobuf:"bytes,2,opt,name=signature,proto3" json:"signature,omitempty"` + // Session token if Extended ACL was set within a session + SessionToken *session.SessionToken `protobuf:"bytes,3,opt,name=session_token,json=sessionToken,proto3" json:"session_token,omitempty"` +} + +func (x *GetExtendedACLResponse_Body) Reset() { + *x = GetExtendedACLResponse_Body{} + if protoimpl.UnsafeEnabled { + mi := &file_container_grpc_service_proto_msgTypes[25] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) + } +} + +func (x *GetExtendedACLResponse_Body) String() string { + return protoimpl.X.MessageStringOf(x) +} + +func (*GetExtendedACLResponse_Body) ProtoMessage() {} + +func (x *GetExtendedACLResponse_Body) ProtoReflect() protoreflect.Message { + mi := &file_container_grpc_service_proto_msgTypes[25] + if protoimpl.UnsafeEnabled && x != nil { + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + if ms.LoadMessageInfo() == nil { + ms.StoreMessageInfo(mi) + } + return ms + } + return mi.MessageOf(x) +} + +// Deprecated: Use GetExtendedACLResponse_Body.ProtoReflect.Descriptor instead. +func (*GetExtendedACLResponse_Body) Descriptor() ([]byte, []int) { + return file_container_grpc_service_proto_rawDescGZIP(), []int{11, 0} +} + +func (x *GetExtendedACLResponse_Body) GetEacl() *acl.EACLTable { + if x != nil { + return x.Eacl + } + return nil +} + +func (x *GetExtendedACLResponse_Body) GetSignature() *refs.SignatureRFC6979 { + if x != nil { + return x.Signature + } + return nil +} + +func (x *GetExtendedACLResponse_Body) GetSessionToken() *session.SessionToken { + if x != nil { + return x.SessionToken + } + return nil +} + +// Container used space announcement body. +type AnnounceUsedSpaceRequest_Body struct { + state protoimpl.MessageState + sizeCache protoimpl.SizeCache + unknownFields protoimpl.UnknownFields + + // List of announcements. If nodes share several containers, + // announcements are transferred in a batch. + Announcements []*AnnounceUsedSpaceRequest_Body_Announcement `protobuf:"bytes,1,rep,name=announcements,proto3" json:"announcements,omitempty"` +} + +func (x *AnnounceUsedSpaceRequest_Body) Reset() { + *x = AnnounceUsedSpaceRequest_Body{} + if protoimpl.UnsafeEnabled { + mi := &file_container_grpc_service_proto_msgTypes[26] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) + } +} + +func (x *AnnounceUsedSpaceRequest_Body) String() string { + return protoimpl.X.MessageStringOf(x) +} + +func (*AnnounceUsedSpaceRequest_Body) ProtoMessage() {} + +func (x *AnnounceUsedSpaceRequest_Body) ProtoReflect() protoreflect.Message { + mi := &file_container_grpc_service_proto_msgTypes[26] + if protoimpl.UnsafeEnabled && x != nil { + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + if ms.LoadMessageInfo() == nil { + ms.StoreMessageInfo(mi) + } + return ms + } + return mi.MessageOf(x) +} + +// Deprecated: Use AnnounceUsedSpaceRequest_Body.ProtoReflect.Descriptor instead. +func (*AnnounceUsedSpaceRequest_Body) Descriptor() ([]byte, []int) { + return file_container_grpc_service_proto_rawDescGZIP(), []int{12, 0} +} + +func (x *AnnounceUsedSpaceRequest_Body) GetAnnouncements() []*AnnounceUsedSpaceRequest_Body_Announcement { + if x != nil { + return x.Announcements + } + return nil +} + +// Announcement contains used space information for a single container. +type AnnounceUsedSpaceRequest_Body_Announcement struct { + state protoimpl.MessageState + sizeCache protoimpl.SizeCache + unknownFields protoimpl.UnknownFields + + // Epoch number for which the container size estimation was produced. + Epoch uint64 `protobuf:"varint,1,opt,name=epoch,proto3" json:"epoch,omitempty"` + // Identifier of the container. + ContainerId *refs.ContainerID `protobuf:"bytes,2,opt,name=container_id,json=containerId,proto3" json:"container_id,omitempty"` + // Used space is a sum of object payload sizes of a specified + // container, stored in the node. It must not include inhumed objects. + UsedSpace uint64 `protobuf:"varint,3,opt,name=used_space,json=usedSpace,proto3" json:"used_space,omitempty"` +} + +func (x *AnnounceUsedSpaceRequest_Body_Announcement) Reset() { + *x = AnnounceUsedSpaceRequest_Body_Announcement{} + if protoimpl.UnsafeEnabled { + mi := &file_container_grpc_service_proto_msgTypes[27] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) + } +} + +func (x *AnnounceUsedSpaceRequest_Body_Announcement) String() string { + return protoimpl.X.MessageStringOf(x) +} + +func (*AnnounceUsedSpaceRequest_Body_Announcement) ProtoMessage() {} + +func (x *AnnounceUsedSpaceRequest_Body_Announcement) ProtoReflect() protoreflect.Message { + mi := &file_container_grpc_service_proto_msgTypes[27] + if protoimpl.UnsafeEnabled && x != nil { + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + if ms.LoadMessageInfo() == nil { + ms.StoreMessageInfo(mi) + } + return ms + } + return mi.MessageOf(x) +} + +// Deprecated: Use AnnounceUsedSpaceRequest_Body_Announcement.ProtoReflect.Descriptor instead. +func (*AnnounceUsedSpaceRequest_Body_Announcement) Descriptor() ([]byte, []int) { + return file_container_grpc_service_proto_rawDescGZIP(), []int{12, 0, 0} +} + +func (x *AnnounceUsedSpaceRequest_Body_Announcement) GetEpoch() uint64 { + if x != nil { + return x.Epoch + } + return 0 +} + +func (x *AnnounceUsedSpaceRequest_Body_Announcement) GetContainerId() *refs.ContainerID { + if x != nil { + return x.ContainerId + } + return nil +} + +func (x *AnnounceUsedSpaceRequest_Body_Announcement) GetUsedSpace() uint64 { + if x != nil { + return x.UsedSpace + } + return 0 +} + +// `AnnounceUsedSpaceResponse` has an empty body because announcements are +// one way communication. +type AnnounceUsedSpaceResponse_Body struct { + state protoimpl.MessageState + sizeCache protoimpl.SizeCache + unknownFields protoimpl.UnknownFields +} + +func (x *AnnounceUsedSpaceResponse_Body) Reset() { + *x = AnnounceUsedSpaceResponse_Body{} + if protoimpl.UnsafeEnabled { + mi := &file_container_grpc_service_proto_msgTypes[28] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) + } +} + +func (x *AnnounceUsedSpaceResponse_Body) String() string { + return protoimpl.X.MessageStringOf(x) +} + +func (*AnnounceUsedSpaceResponse_Body) ProtoMessage() {} + +func (x *AnnounceUsedSpaceResponse_Body) ProtoReflect() protoreflect.Message { + mi := &file_container_grpc_service_proto_msgTypes[28] + if protoimpl.UnsafeEnabled && x != nil { + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + if ms.LoadMessageInfo() == nil { + ms.StoreMessageInfo(mi) + } + return ms + } + return mi.MessageOf(x) +} + +// Deprecated: Use AnnounceUsedSpaceResponse_Body.ProtoReflect.Descriptor instead. +func (*AnnounceUsedSpaceResponse_Body) Descriptor() ([]byte, []int) { + return file_container_grpc_service_proto_rawDescGZIP(), []int{13, 0} +} + +var File_container_grpc_service_proto protoreflect.FileDescriptor + +var file_container_grpc_service_proto_rawDesc = []byte{ + 0x0a, 0x1c, 0x63, 0x6f, 0x6e, 0x74, 0x61, 0x69, 0x6e, 0x65, 0x72, 0x2f, 0x67, 0x72, 0x70, 0x63, + 0x2f, 0x73, 0x65, 0x72, 0x76, 0x69, 0x63, 0x65, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x12, 0x13, + 0x6e, 0x65, 0x6f, 0x2e, 0x66, 0x73, 0x2e, 0x76, 0x32, 0x2e, 0x63, 0x6f, 0x6e, 0x74, 0x61, 0x69, + 0x6e, 0x65, 0x72, 0x1a, 0x14, 0x61, 0x63, 0x6c, 0x2f, 0x67, 0x72, 0x70, 0x63, 0x2f, 0x74, 0x79, + 0x70, 0x65, 0x73, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x1a, 0x1a, 0x63, 0x6f, 0x6e, 0x74, 0x61, + 0x69, 0x6e, 0x65, 0x72, 0x2f, 0x67, 0x72, 0x70, 0x63, 0x2f, 0x74, 0x79, 0x70, 0x65, 0x73, 0x2e, + 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x1a, 0x15, 0x72, 0x65, 0x66, 0x73, 0x2f, 0x67, 0x72, 0x70, 0x63, + 0x2f, 0x74, 0x79, 0x70, 0x65, 0x73, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x1a, 0x18, 0x73, 0x65, + 0x73, 0x73, 0x69, 0x6f, 0x6e, 0x2f, 0x67, 0x72, 0x70, 0x63, 0x2f, 0x74, 0x79, 0x70, 0x65, 0x73, + 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x22, 0xe7, 0x02, 0x0a, 0x0a, 0x50, 0x75, 0x74, 0x52, 0x65, + 0x71, 0x75, 0x65, 0x73, 0x74, 0x12, 0x38, 0x0a, 0x04, 0x62, 0x6f, 0x64, 0x79, 0x18, 0x01, 0x20, + 0x01, 0x28, 0x0b, 0x32, 0x24, 0x2e, 0x6e, 0x65, 0x6f, 0x2e, 0x66, 0x73, 0x2e, 0x76, 0x32, 0x2e, + 0x63, 0x6f, 0x6e, 0x74, 0x61, 0x69, 0x6e, 0x65, 0x72, 0x2e, 0x50, 0x75, 0x74, 0x52, 0x65, 0x71, + 0x75, 0x65, 0x73, 0x74, 0x2e, 0x42, 0x6f, 0x64, 0x79, 0x52, 0x04, 0x62, 0x6f, 0x64, 0x79, 0x12, + 0x45, 0x0a, 0x0b, 0x6d, 0x65, 0x74, 0x61, 0x5f, 0x68, 0x65, 0x61, 0x64, 0x65, 0x72, 0x18, 0x02, + 0x20, 0x01, 0x28, 0x0b, 0x32, 0x24, 0x2e, 0x6e, 0x65, 0x6f, 0x2e, 0x66, 0x73, 0x2e, 0x76, 0x32, + 0x2e, 0x73, 0x65, 0x73, 0x73, 0x69, 0x6f, 0x6e, 0x2e, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, + 0x4d, 0x65, 0x74, 0x61, 0x48, 0x65, 0x61, 0x64, 0x65, 0x72, 0x52, 0x0a, 0x6d, 0x65, 0x74, 0x61, + 0x48, 0x65, 0x61, 0x64, 0x65, 0x72, 0x12, 0x51, 0x0a, 0x0d, 0x76, 0x65, 0x72, 0x69, 0x66, 0x79, + 0x5f, 0x68, 0x65, 0x61, 0x64, 0x65, 0x72, 0x18, 0x03, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x2c, 0x2e, + 0x6e, 0x65, 0x6f, 0x2e, 0x66, 0x73, 0x2e, 0x76, 0x32, 0x2e, 0x73, 0x65, 0x73, 0x73, 0x69, 0x6f, + 0x6e, 0x2e, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x56, 0x65, 0x72, 0x69, 0x66, 0x69, 0x63, + 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x48, 0x65, 0x61, 0x64, 0x65, 0x72, 0x52, 0x0c, 0x76, 0x65, 0x72, + 0x69, 0x66, 0x79, 0x48, 0x65, 0x61, 0x64, 0x65, 0x72, 0x1a, 0x84, 0x01, 0x0a, 0x04, 0x42, 0x6f, + 0x64, 0x79, 0x12, 0x3c, 0x0a, 0x09, 0x63, 0x6f, 0x6e, 0x74, 0x61, 0x69, 0x6e, 0x65, 0x72, 0x18, + 0x01, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x1e, 0x2e, 0x6e, 0x65, 0x6f, 0x2e, 0x66, 0x73, 0x2e, 0x76, + 0x32, 0x2e, 0x63, 0x6f, 0x6e, 0x74, 0x61, 0x69, 0x6e, 0x65, 0x72, 0x2e, 0x43, 0x6f, 0x6e, 0x74, + 0x61, 0x69, 0x6e, 0x65, 0x72, 0x52, 0x09, 0x63, 0x6f, 0x6e, 0x74, 0x61, 0x69, 0x6e, 0x65, 0x72, + 0x12, 0x3e, 0x0a, 0x09, 0x73, 0x69, 0x67, 0x6e, 0x61, 0x74, 0x75, 0x72, 0x65, 0x18, 0x02, 0x20, + 0x01, 0x28, 0x0b, 0x32, 0x20, 0x2e, 0x6e, 0x65, 0x6f, 0x2e, 0x66, 0x73, 0x2e, 0x76, 0x32, 0x2e, + 0x72, 0x65, 0x66, 0x73, 0x2e, 0x53, 0x69, 0x67, 0x6e, 0x61, 0x74, 0x75, 0x72, 0x65, 0x52, 0x46, + 0x43, 0x36, 0x39, 0x37, 0x39, 0x52, 0x09, 0x73, 0x69, 0x67, 0x6e, 0x61, 0x74, 0x75, 0x72, 0x65, + 0x22, 0xac, 0x02, 0x0a, 0x0b, 0x50, 0x75, 0x74, 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, + 0x12, 0x39, 0x0a, 0x04, 0x62, 0x6f, 0x64, 0x79, 0x18, 0x01, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x25, + 0x2e, 0x6e, 0x65, 0x6f, 0x2e, 0x66, 0x73, 0x2e, 0x76, 0x32, 0x2e, 0x63, 0x6f, 0x6e, 0x74, 0x61, + 0x69, 0x6e, 0x65, 0x72, 0x2e, 0x50, 0x75, 0x74, 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, + 0x2e, 0x42, 0x6f, 0x64, 0x79, 0x52, 0x04, 0x62, 0x6f, 0x64, 0x79, 0x12, 0x46, 0x0a, 0x0b, 0x6d, + 0x65, 0x74, 0x61, 0x5f, 0x68, 0x65, 0x61, 0x64, 0x65, 0x72, 0x18, 0x02, 0x20, 0x01, 0x28, 0x0b, + 0x32, 0x25, 0x2e, 0x6e, 0x65, 0x6f, 0x2e, 0x66, 0x73, 0x2e, 0x76, 0x32, 0x2e, 0x73, 0x65, 0x73, + 0x73, 0x69, 0x6f, 0x6e, 0x2e, 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x4d, 0x65, 0x74, + 0x61, 0x48, 0x65, 0x61, 0x64, 0x65, 0x72, 0x52, 0x0a, 0x6d, 0x65, 0x74, 0x61, 0x48, 0x65, 0x61, + 0x64, 0x65, 0x72, 0x12, 0x52, 0x0a, 0x0d, 0x76, 0x65, 0x72, 0x69, 0x66, 0x79, 0x5f, 0x68, 0x65, + 0x61, 0x64, 0x65, 0x72, 0x18, 0x03, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x2d, 0x2e, 0x6e, 0x65, 0x6f, + 0x2e, 0x66, 0x73, 0x2e, 0x76, 0x32, 0x2e, 0x73, 0x65, 0x73, 0x73, 0x69, 0x6f, 0x6e, 0x2e, 0x52, + 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x56, 0x65, 0x72, 0x69, 0x66, 0x69, 0x63, 0x61, 0x74, + 0x69, 0x6f, 0x6e, 0x48, 0x65, 0x61, 0x64, 0x65, 0x72, 0x52, 0x0c, 0x76, 0x65, 0x72, 0x69, 0x66, + 0x79, 0x48, 0x65, 0x61, 0x64, 0x65, 0x72, 0x1a, 0x46, 0x0a, 0x04, 0x42, 0x6f, 0x64, 0x79, 0x12, + 0x3e, 0x0a, 0x0c, 0x63, 0x6f, 0x6e, 0x74, 0x61, 0x69, 0x6e, 0x65, 0x72, 0x5f, 0x69, 0x64, 0x18, + 0x01, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x1b, 0x2e, 0x6e, 0x65, 0x6f, 0x2e, 0x66, 0x73, 0x2e, 0x76, + 0x32, 0x2e, 0x72, 0x65, 0x66, 0x73, 0x2e, 0x43, 0x6f, 0x6e, 0x74, 0x61, 0x69, 0x6e, 0x65, 0x72, + 0x49, 0x44, 0x52, 0x0b, 0x63, 0x6f, 0x6e, 0x74, 0x61, 0x69, 0x6e, 0x65, 0x72, 0x49, 0x64, 0x22, + 0xef, 0x02, 0x0a, 0x0d, 0x44, 0x65, 0x6c, 0x65, 0x74, 0x65, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, + 0x74, 0x12, 0x3b, 0x0a, 0x04, 0x62, 0x6f, 0x64, 0x79, 0x18, 0x01, 0x20, 0x01, 0x28, 0x0b, 0x32, + 0x27, 0x2e, 0x6e, 0x65, 0x6f, 0x2e, 0x66, 0x73, 0x2e, 0x76, 0x32, 0x2e, 0x63, 0x6f, 0x6e, 0x74, + 0x61, 0x69, 0x6e, 0x65, 0x72, 0x2e, 0x44, 0x65, 0x6c, 0x65, 0x74, 0x65, 0x52, 0x65, 0x71, 0x75, + 0x65, 0x73, 0x74, 0x2e, 0x42, 0x6f, 0x64, 0x79, 0x52, 0x04, 0x62, 0x6f, 0x64, 0x79, 0x12, 0x45, + 0x0a, 0x0b, 0x6d, 0x65, 0x74, 0x61, 0x5f, 0x68, 0x65, 0x61, 0x64, 0x65, 0x72, 0x18, 0x02, 0x20, + 0x01, 0x28, 0x0b, 0x32, 0x24, 0x2e, 0x6e, 0x65, 0x6f, 0x2e, 0x66, 0x73, 0x2e, 0x76, 0x32, 0x2e, + 0x73, 0x65, 0x73, 0x73, 0x69, 0x6f, 0x6e, 0x2e, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x4d, + 0x65, 0x74, 0x61, 0x48, 0x65, 0x61, 0x64, 0x65, 0x72, 0x52, 0x0a, 0x6d, 0x65, 0x74, 0x61, 0x48, + 0x65, 0x61, 0x64, 0x65, 0x72, 0x12, 0x51, 0x0a, 0x0d, 0x76, 0x65, 0x72, 0x69, 0x66, 0x79, 0x5f, + 0x68, 0x65, 0x61, 0x64, 0x65, 0x72, 0x18, 0x03, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x2c, 0x2e, 0x6e, + 0x65, 0x6f, 0x2e, 0x66, 0x73, 0x2e, 0x76, 0x32, 0x2e, 0x73, 0x65, 0x73, 0x73, 0x69, 0x6f, 0x6e, + 0x2e, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x56, 0x65, 0x72, 0x69, 0x66, 0x69, 0x63, 0x61, + 0x74, 0x69, 0x6f, 0x6e, 0x48, 0x65, 0x61, 0x64, 0x65, 0x72, 0x52, 0x0c, 0x76, 0x65, 0x72, 0x69, + 0x66, 0x79, 0x48, 0x65, 0x61, 0x64, 0x65, 0x72, 0x1a, 0x86, 0x01, 0x0a, 0x04, 0x42, 0x6f, 0x64, + 0x79, 0x12, 0x3e, 0x0a, 0x0c, 0x63, 0x6f, 0x6e, 0x74, 0x61, 0x69, 0x6e, 0x65, 0x72, 0x5f, 0x69, + 0x64, 0x18, 0x01, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x1b, 0x2e, 0x6e, 0x65, 0x6f, 0x2e, 0x66, 0x73, + 0x2e, 0x76, 0x32, 0x2e, 0x72, 0x65, 0x66, 0x73, 0x2e, 0x43, 0x6f, 0x6e, 0x74, 0x61, 0x69, 0x6e, + 0x65, 0x72, 0x49, 0x44, 0x52, 0x0b, 0x63, 0x6f, 0x6e, 0x74, 0x61, 0x69, 0x6e, 0x65, 0x72, 0x49, + 0x64, 0x12, 0x3e, 0x0a, 0x09, 0x73, 0x69, 0x67, 0x6e, 0x61, 0x74, 0x75, 0x72, 0x65, 0x18, 0x02, + 0x20, 0x01, 0x28, 0x0b, 0x32, 0x20, 0x2e, 0x6e, 0x65, 0x6f, 0x2e, 0x66, 0x73, 0x2e, 0x76, 0x32, + 0x2e, 0x72, 0x65, 0x66, 0x73, 0x2e, 0x53, 0x69, 0x67, 0x6e, 0x61, 0x74, 0x75, 0x72, 0x65, 0x52, + 0x46, 0x43, 0x36, 0x39, 0x37, 0x39, 0x52, 0x09, 0x73, 0x69, 0x67, 0x6e, 0x61, 0x74, 0x75, 0x72, + 0x65, 0x22, 0xf2, 0x01, 0x0a, 0x0e, 0x44, 0x65, 0x6c, 0x65, 0x74, 0x65, 0x52, 0x65, 0x73, 0x70, + 0x6f, 0x6e, 0x73, 0x65, 0x12, 0x3c, 0x0a, 0x04, 0x62, 0x6f, 0x64, 0x79, 0x18, 0x01, 0x20, 0x01, + 0x28, 0x0b, 0x32, 0x28, 0x2e, 0x6e, 0x65, 0x6f, 0x2e, 0x66, 0x73, 0x2e, 0x76, 0x32, 0x2e, 0x63, + 0x6f, 0x6e, 0x74, 0x61, 0x69, 0x6e, 0x65, 0x72, 0x2e, 0x44, 0x65, 0x6c, 0x65, 0x74, 0x65, 0x52, + 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x2e, 0x42, 0x6f, 0x64, 0x79, 0x52, 0x04, 0x62, 0x6f, + 0x64, 0x79, 0x12, 0x46, 0x0a, 0x0b, 0x6d, 0x65, 0x74, 0x61, 0x5f, 0x68, 0x65, 0x61, 0x64, 0x65, + 0x72, 0x18, 0x02, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x25, 0x2e, 0x6e, 0x65, 0x6f, 0x2e, 0x66, 0x73, + 0x2e, 0x76, 0x32, 0x2e, 0x73, 0x65, 0x73, 0x73, 0x69, 0x6f, 0x6e, 0x2e, 0x52, 0x65, 0x73, 0x70, + 0x6f, 0x6e, 0x73, 0x65, 0x4d, 0x65, 0x74, 0x61, 0x48, 0x65, 0x61, 0x64, 0x65, 0x72, 0x52, 0x0a, + 0x6d, 0x65, 0x74, 0x61, 0x48, 0x65, 0x61, 0x64, 0x65, 0x72, 0x12, 0x52, 0x0a, 0x0d, 0x76, 0x65, + 0x72, 0x69, 0x66, 0x79, 0x5f, 0x68, 0x65, 0x61, 0x64, 0x65, 0x72, 0x18, 0x03, 0x20, 0x01, 0x28, + 0x0b, 0x32, 0x2d, 0x2e, 0x6e, 0x65, 0x6f, 0x2e, 0x66, 0x73, 0x2e, 0x76, 0x32, 0x2e, 0x73, 0x65, + 0x73, 0x73, 0x69, 0x6f, 0x6e, 0x2e, 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x56, 0x65, + 0x72, 0x69, 0x66, 0x69, 0x63, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x48, 0x65, 0x61, 0x64, 0x65, 0x72, + 0x52, 0x0c, 0x76, 0x65, 0x72, 0x69, 0x66, 0x79, 0x48, 0x65, 0x61, 0x64, 0x65, 0x72, 0x1a, 0x06, + 0x0a, 0x04, 0x42, 0x6f, 0x64, 0x79, 0x22, 0xa8, 0x02, 0x0a, 0x0a, 0x47, 0x65, 0x74, 0x52, 0x65, + 0x71, 0x75, 0x65, 0x73, 0x74, 0x12, 0x38, 0x0a, 0x04, 0x62, 0x6f, 0x64, 0x79, 0x18, 0x01, 0x20, + 0x01, 0x28, 0x0b, 0x32, 0x24, 0x2e, 0x6e, 0x65, 0x6f, 0x2e, 0x66, 0x73, 0x2e, 0x76, 0x32, 0x2e, + 0x63, 0x6f, 0x6e, 0x74, 0x61, 0x69, 0x6e, 0x65, 0x72, 0x2e, 0x47, 0x65, 0x74, 0x52, 0x65, 0x71, + 0x75, 0x65, 0x73, 0x74, 0x2e, 0x42, 0x6f, 0x64, 0x79, 0x52, 0x04, 0x62, 0x6f, 0x64, 0x79, 0x12, + 0x45, 0x0a, 0x0b, 0x6d, 0x65, 0x74, 0x61, 0x5f, 0x68, 0x65, 0x61, 0x64, 0x65, 0x72, 0x18, 0x02, + 0x20, 0x01, 0x28, 0x0b, 0x32, 0x24, 0x2e, 0x6e, 0x65, 0x6f, 0x2e, 0x66, 0x73, 0x2e, 0x76, 0x32, + 0x2e, 0x73, 0x65, 0x73, 0x73, 0x69, 0x6f, 0x6e, 0x2e, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, + 0x4d, 0x65, 0x74, 0x61, 0x48, 0x65, 0x61, 0x64, 0x65, 0x72, 0x52, 0x0a, 0x6d, 0x65, 0x74, 0x61, + 0x48, 0x65, 0x61, 0x64, 0x65, 0x72, 0x12, 0x51, 0x0a, 0x0d, 0x76, 0x65, 0x72, 0x69, 0x66, 0x79, + 0x5f, 0x68, 0x65, 0x61, 0x64, 0x65, 0x72, 0x18, 0x03, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x2c, 0x2e, + 0x6e, 0x65, 0x6f, 0x2e, 0x66, 0x73, 0x2e, 0x76, 0x32, 0x2e, 0x73, 0x65, 0x73, 0x73, 0x69, 0x6f, + 0x6e, 0x2e, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x56, 0x65, 0x72, 0x69, 0x66, 0x69, 0x63, + 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x48, 0x65, 0x61, 0x64, 0x65, 0x72, 0x52, 0x0c, 0x76, 0x65, 0x72, + 0x69, 0x66, 0x79, 0x48, 0x65, 0x61, 0x64, 0x65, 0x72, 0x1a, 0x46, 0x0a, 0x04, 0x42, 0x6f, 0x64, + 0x79, 0x12, 0x3e, 0x0a, 0x0c, 0x63, 0x6f, 0x6e, 0x74, 0x61, 0x69, 0x6e, 0x65, 0x72, 0x5f, 0x69, + 0x64, 0x18, 0x01, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x1b, 0x2e, 0x6e, 0x65, 0x6f, 0x2e, 0x66, 0x73, + 0x2e, 0x76, 0x32, 0x2e, 0x72, 0x65, 0x66, 0x73, 0x2e, 0x43, 0x6f, 0x6e, 0x74, 0x61, 0x69, 0x6e, + 0x65, 0x72, 0x49, 0x44, 0x52, 0x0b, 0x63, 0x6f, 0x6e, 0x74, 0x61, 0x69, 0x6e, 0x65, 0x72, 0x49, + 0x64, 0x22, 0xb1, 0x03, 0x0a, 0x0b, 0x47, 0x65, 0x74, 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, + 0x65, 0x12, 0x39, 0x0a, 0x04, 0x62, 0x6f, 0x64, 0x79, 0x18, 0x01, 0x20, 0x01, 0x28, 0x0b, 0x32, + 0x25, 0x2e, 0x6e, 0x65, 0x6f, 0x2e, 0x66, 0x73, 0x2e, 0x76, 0x32, 0x2e, 0x63, 0x6f, 0x6e, 0x74, + 0x61, 0x69, 0x6e, 0x65, 0x72, 0x2e, 0x47, 0x65, 0x74, 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, + 0x65, 0x2e, 0x42, 0x6f, 0x64, 0x79, 0x52, 0x04, 0x62, 0x6f, 0x64, 0x79, 0x12, 0x46, 0x0a, 0x0b, + 0x6d, 0x65, 0x74, 0x61, 0x5f, 0x68, 0x65, 0x61, 0x64, 0x65, 0x72, 0x18, 0x02, 0x20, 0x01, 0x28, + 0x0b, 0x32, 0x25, 0x2e, 0x6e, 0x65, 0x6f, 0x2e, 0x66, 0x73, 0x2e, 0x76, 0x32, 0x2e, 0x73, 0x65, + 0x73, 0x73, 0x69, 0x6f, 0x6e, 0x2e, 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x4d, 0x65, + 0x74, 0x61, 0x48, 0x65, 0x61, 0x64, 0x65, 0x72, 0x52, 0x0a, 0x6d, 0x65, 0x74, 0x61, 0x48, 0x65, + 0x61, 0x64, 0x65, 0x72, 0x12, 0x52, 0x0a, 0x0d, 0x76, 0x65, 0x72, 0x69, 0x66, 0x79, 0x5f, 0x68, + 0x65, 0x61, 0x64, 0x65, 0x72, 0x18, 0x03, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x2d, 0x2e, 0x6e, 0x65, + 0x6f, 0x2e, 0x66, 0x73, 0x2e, 0x76, 0x32, 0x2e, 0x73, 0x65, 0x73, 0x73, 0x69, 0x6f, 0x6e, 0x2e, + 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x56, 0x65, 0x72, 0x69, 0x66, 0x69, 0x63, 0x61, + 0x74, 0x69, 0x6f, 0x6e, 0x48, 0x65, 0x61, 0x64, 0x65, 0x72, 0x52, 0x0c, 0x76, 0x65, 0x72, 0x69, + 0x66, 0x79, 0x48, 0x65, 0x61, 0x64, 0x65, 0x72, 0x1a, 0xca, 0x01, 0x0a, 0x04, 0x42, 0x6f, 0x64, + 0x79, 0x12, 0x3c, 0x0a, 0x09, 0x63, 0x6f, 0x6e, 0x74, 0x61, 0x69, 0x6e, 0x65, 0x72, 0x18, 0x01, + 0x20, 0x01, 0x28, 0x0b, 0x32, 0x1e, 0x2e, 0x6e, 0x65, 0x6f, 0x2e, 0x66, 0x73, 0x2e, 0x76, 0x32, + 0x2e, 0x63, 0x6f, 0x6e, 0x74, 0x61, 0x69, 0x6e, 0x65, 0x72, 0x2e, 0x43, 0x6f, 0x6e, 0x74, 0x61, + 0x69, 0x6e, 0x65, 0x72, 0x52, 0x09, 0x63, 0x6f, 0x6e, 0x74, 0x61, 0x69, 0x6e, 0x65, 0x72, 0x12, + 0x3e, 0x0a, 0x09, 0x73, 0x69, 0x67, 0x6e, 0x61, 0x74, 0x75, 0x72, 0x65, 0x18, 0x02, 0x20, 0x01, + 0x28, 0x0b, 0x32, 0x20, 0x2e, 0x6e, 0x65, 0x6f, 0x2e, 0x66, 0x73, 0x2e, 0x76, 0x32, 0x2e, 0x72, + 0x65, 0x66, 0x73, 0x2e, 0x53, 0x69, 0x67, 0x6e, 0x61, 0x74, 0x75, 0x72, 0x65, 0x52, 0x46, 0x43, + 0x36, 0x39, 0x37, 0x39, 0x52, 0x09, 0x73, 0x69, 0x67, 0x6e, 0x61, 0x74, 0x75, 0x72, 0x65, 0x12, + 0x44, 0x0a, 0x0d, 0x73, 0x65, 0x73, 0x73, 0x69, 0x6f, 0x6e, 0x5f, 0x74, 0x6f, 0x6b, 0x65, 0x6e, + 0x18, 0x03, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x1f, 0x2e, 0x6e, 0x65, 0x6f, 0x2e, 0x66, 0x73, 0x2e, + 0x76, 0x32, 0x2e, 0x73, 0x65, 0x73, 0x73, 0x69, 0x6f, 0x6e, 0x2e, 0x53, 0x65, 0x73, 0x73, 0x69, + 0x6f, 0x6e, 0x54, 0x6f, 0x6b, 0x65, 0x6e, 0x52, 0x0c, 0x73, 0x65, 0x73, 0x73, 0x69, 0x6f, 0x6e, + 0x54, 0x6f, 0x6b, 0x65, 0x6e, 0x22, 0x9e, 0x02, 0x0a, 0x0b, 0x4c, 0x69, 0x73, 0x74, 0x52, 0x65, + 0x71, 0x75, 0x65, 0x73, 0x74, 0x12, 0x39, 0x0a, 0x04, 0x62, 0x6f, 0x64, 0x79, 0x18, 0x01, 0x20, + 0x01, 0x28, 0x0b, 0x32, 0x25, 0x2e, 0x6e, 0x65, 0x6f, 0x2e, 0x66, 0x73, 0x2e, 0x76, 0x32, 0x2e, + 0x63, 0x6f, 0x6e, 0x74, 0x61, 0x69, 0x6e, 0x65, 0x72, 0x2e, 0x4c, 0x69, 0x73, 0x74, 0x52, 0x65, + 0x71, 0x75, 0x65, 0x73, 0x74, 0x2e, 0x42, 0x6f, 0x64, 0x79, 0x52, 0x04, 0x62, 0x6f, 0x64, 0x79, + 0x12, 0x45, 0x0a, 0x0b, 0x6d, 0x65, 0x74, 0x61, 0x5f, 0x68, 0x65, 0x61, 0x64, 0x65, 0x72, 0x18, + 0x02, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x24, 0x2e, 0x6e, 0x65, 0x6f, 0x2e, 0x66, 0x73, 0x2e, 0x76, + 0x32, 0x2e, 0x73, 0x65, 0x73, 0x73, 0x69, 0x6f, 0x6e, 0x2e, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, + 0x74, 0x4d, 0x65, 0x74, 0x61, 0x48, 0x65, 0x61, 0x64, 0x65, 0x72, 0x52, 0x0a, 0x6d, 0x65, 0x74, + 0x61, 0x48, 0x65, 0x61, 0x64, 0x65, 0x72, 0x12, 0x51, 0x0a, 0x0d, 0x76, 0x65, 0x72, 0x69, 0x66, + 0x79, 0x5f, 0x68, 0x65, 0x61, 0x64, 0x65, 0x72, 0x18, 0x03, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x2c, + 0x2e, 0x6e, 0x65, 0x6f, 0x2e, 0x66, 0x73, 0x2e, 0x76, 0x32, 0x2e, 0x73, 0x65, 0x73, 0x73, 0x69, + 0x6f, 0x6e, 0x2e, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x56, 0x65, 0x72, 0x69, 0x66, 0x69, + 0x63, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x48, 0x65, 0x61, 0x64, 0x65, 0x72, 0x52, 0x0c, 0x76, 0x65, + 0x72, 0x69, 0x66, 0x79, 0x48, 0x65, 0x61, 0x64, 0x65, 0x72, 0x1a, 0x3a, 0x0a, 0x04, 0x42, 0x6f, + 0x64, 0x79, 0x12, 0x32, 0x0a, 0x08, 0x6f, 0x77, 0x6e, 0x65, 0x72, 0x5f, 0x69, 0x64, 0x18, 0x01, + 0x20, 0x01, 0x28, 0x0b, 0x32, 0x17, 0x2e, 0x6e, 0x65, 0x6f, 0x2e, 0x66, 0x73, 0x2e, 0x76, 0x32, + 0x2e, 0x72, 0x65, 0x66, 0x73, 0x2e, 0x4f, 0x77, 0x6e, 0x65, 0x72, 0x49, 0x44, 0x52, 0x07, 0x6f, + 0x77, 0x6e, 0x65, 0x72, 0x49, 0x64, 0x22, 0xb0, 0x02, 0x0a, 0x0c, 0x4c, 0x69, 0x73, 0x74, 0x52, + 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x12, 0x3a, 0x0a, 0x04, 0x62, 0x6f, 0x64, 0x79, 0x18, + 0x01, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x26, 0x2e, 0x6e, 0x65, 0x6f, 0x2e, 0x66, 0x73, 0x2e, 0x76, + 0x32, 0x2e, 0x63, 0x6f, 0x6e, 0x74, 0x61, 0x69, 0x6e, 0x65, 0x72, 0x2e, 0x4c, 0x69, 0x73, 0x74, + 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x2e, 0x42, 0x6f, 0x64, 0x79, 0x52, 0x04, 0x62, + 0x6f, 0x64, 0x79, 0x12, 0x46, 0x0a, 0x0b, 0x6d, 0x65, 0x74, 0x61, 0x5f, 0x68, 0x65, 0x61, 0x64, + 0x65, 0x72, 0x18, 0x02, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x25, 0x2e, 0x6e, 0x65, 0x6f, 0x2e, 0x66, + 0x73, 0x2e, 0x76, 0x32, 0x2e, 0x73, 0x65, 0x73, 0x73, 0x69, 0x6f, 0x6e, 0x2e, 0x52, 0x65, 0x73, + 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x4d, 0x65, 0x74, 0x61, 0x48, 0x65, 0x61, 0x64, 0x65, 0x72, 0x52, + 0x0a, 0x6d, 0x65, 0x74, 0x61, 0x48, 0x65, 0x61, 0x64, 0x65, 0x72, 0x12, 0x52, 0x0a, 0x0d, 0x76, + 0x65, 0x72, 0x69, 0x66, 0x79, 0x5f, 0x68, 0x65, 0x61, 0x64, 0x65, 0x72, 0x18, 0x03, 0x20, 0x01, + 0x28, 0x0b, 0x32, 0x2d, 0x2e, 0x6e, 0x65, 0x6f, 0x2e, 0x66, 0x73, 0x2e, 0x76, 0x32, 0x2e, 0x73, + 0x65, 0x73, 0x73, 0x69, 0x6f, 0x6e, 0x2e, 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x56, + 0x65, 0x72, 0x69, 0x66, 0x69, 0x63, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x48, 0x65, 0x61, 0x64, 0x65, + 0x72, 0x52, 0x0c, 0x76, 0x65, 0x72, 0x69, 0x66, 0x79, 0x48, 0x65, 0x61, 0x64, 0x65, 0x72, 0x1a, + 0x48, 0x0a, 0x04, 0x42, 0x6f, 0x64, 0x79, 0x12, 0x40, 0x0a, 0x0d, 0x63, 0x6f, 0x6e, 0x74, 0x61, + 0x69, 0x6e, 0x65, 0x72, 0x5f, 0x69, 0x64, 0x73, 0x18, 0x01, 0x20, 0x03, 0x28, 0x0b, 0x32, 0x1b, + 0x2e, 0x6e, 0x65, 0x6f, 0x2e, 0x66, 0x73, 0x2e, 0x76, 0x32, 0x2e, 0x72, 0x65, 0x66, 0x73, 0x2e, + 0x43, 0x6f, 0x6e, 0x74, 0x61, 0x69, 0x6e, 0x65, 0x72, 0x49, 0x44, 0x52, 0x0c, 0x63, 0x6f, 0x6e, + 0x74, 0x61, 0x69, 0x6e, 0x65, 0x72, 0x49, 0x64, 0x73, 0x22, 0xec, 0x02, 0x0a, 0x15, 0x53, 0x65, + 0x74, 0x45, 0x78, 0x74, 0x65, 0x6e, 0x64, 0x65, 0x64, 0x41, 0x43, 0x4c, 0x52, 0x65, 0x71, 0x75, + 0x65, 0x73, 0x74, 0x12, 0x43, 0x0a, 0x04, 0x62, 0x6f, 0x64, 0x79, 0x18, 0x01, 0x20, 0x01, 0x28, + 0x0b, 0x32, 0x2f, 0x2e, 0x6e, 0x65, 0x6f, 0x2e, 0x66, 0x73, 0x2e, 0x76, 0x32, 0x2e, 0x63, 0x6f, + 0x6e, 0x74, 0x61, 0x69, 0x6e, 0x65, 0x72, 0x2e, 0x53, 0x65, 0x74, 0x45, 0x78, 0x74, 0x65, 0x6e, + 0x64, 0x65, 0x64, 0x41, 0x43, 0x4c, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x2e, 0x42, 0x6f, + 0x64, 0x79, 0x52, 0x04, 0x62, 0x6f, 0x64, 0x79, 0x12, 0x45, 0x0a, 0x0b, 0x6d, 0x65, 0x74, 0x61, + 0x5f, 0x68, 0x65, 0x61, 0x64, 0x65, 0x72, 0x18, 0x02, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x24, 0x2e, + 0x6e, 0x65, 0x6f, 0x2e, 0x66, 0x73, 0x2e, 0x76, 0x32, 0x2e, 0x73, 0x65, 0x73, 0x73, 0x69, 0x6f, + 0x6e, 0x2e, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x4d, 0x65, 0x74, 0x61, 0x48, 0x65, 0x61, + 0x64, 0x65, 0x72, 0x52, 0x0a, 0x6d, 0x65, 0x74, 0x61, 0x48, 0x65, 0x61, 0x64, 0x65, 0x72, 0x12, + 0x51, 0x0a, 0x0d, 0x76, 0x65, 0x72, 0x69, 0x66, 0x79, 0x5f, 0x68, 0x65, 0x61, 0x64, 0x65, 0x72, + 0x18, 0x03, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x2c, 0x2e, 0x6e, 0x65, 0x6f, 0x2e, 0x66, 0x73, 0x2e, + 0x76, 0x32, 0x2e, 0x73, 0x65, 0x73, 0x73, 0x69, 0x6f, 0x6e, 0x2e, 0x52, 0x65, 0x71, 0x75, 0x65, + 0x73, 0x74, 0x56, 0x65, 0x72, 0x69, 0x66, 0x69, 0x63, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x48, 0x65, + 0x61, 0x64, 0x65, 0x72, 0x52, 0x0c, 0x76, 0x65, 0x72, 0x69, 0x66, 0x79, 0x48, 0x65, 0x61, 0x64, + 0x65, 0x72, 0x1a, 0x74, 0x0a, 0x04, 0x42, 0x6f, 0x64, 0x79, 0x12, 0x2c, 0x0a, 0x04, 0x65, 0x61, + 0x63, 0x6c, 0x18, 0x01, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x18, 0x2e, 0x6e, 0x65, 0x6f, 0x2e, 0x66, + 0x73, 0x2e, 0x76, 0x32, 0x2e, 0x61, 0x63, 0x6c, 0x2e, 0x45, 0x41, 0x43, 0x4c, 0x54, 0x61, 0x62, + 0x6c, 0x65, 0x52, 0x04, 0x65, 0x61, 0x63, 0x6c, 0x12, 0x3e, 0x0a, 0x09, 0x73, 0x69, 0x67, 0x6e, + 0x61, 0x74, 0x75, 0x72, 0x65, 0x18, 0x02, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x20, 0x2e, 0x6e, 0x65, + 0x6f, 0x2e, 0x66, 0x73, 0x2e, 0x76, 0x32, 0x2e, 0x72, 0x65, 0x66, 0x73, 0x2e, 0x53, 0x69, 0x67, + 0x6e, 0x61, 0x74, 0x75, 0x72, 0x65, 0x52, 0x46, 0x43, 0x36, 0x39, 0x37, 0x39, 0x52, 0x09, 0x73, + 0x69, 0x67, 0x6e, 0x61, 0x74, 0x75, 0x72, 0x65, 0x22, 0x82, 0x02, 0x0a, 0x16, 0x53, 0x65, 0x74, + 0x45, 0x78, 0x74, 0x65, 0x6e, 0x64, 0x65, 0x64, 0x41, 0x43, 0x4c, 0x52, 0x65, 0x73, 0x70, 0x6f, + 0x6e, 0x73, 0x65, 0x12, 0x44, 0x0a, 0x04, 0x62, 0x6f, 0x64, 0x79, 0x18, 0x01, 0x20, 0x01, 0x28, + 0x0b, 0x32, 0x30, 0x2e, 0x6e, 0x65, 0x6f, 0x2e, 0x66, 0x73, 0x2e, 0x76, 0x32, 0x2e, 0x63, 0x6f, + 0x6e, 0x74, 0x61, 0x69, 0x6e, 0x65, 0x72, 0x2e, 0x53, 0x65, 0x74, 0x45, 0x78, 0x74, 0x65, 0x6e, + 0x64, 0x65, 0x64, 0x41, 0x43, 0x4c, 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x2e, 0x42, + 0x6f, 0x64, 0x79, 0x52, 0x04, 0x62, 0x6f, 0x64, 0x79, 0x12, 0x46, 0x0a, 0x0b, 0x6d, 0x65, 0x74, + 0x61, 0x5f, 0x68, 0x65, 0x61, 0x64, 0x65, 0x72, 0x18, 0x02, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x25, + 0x2e, 0x6e, 0x65, 0x6f, 0x2e, 0x66, 0x73, 0x2e, 0x76, 0x32, 0x2e, 0x73, 0x65, 0x73, 0x73, 0x69, + 0x6f, 0x6e, 0x2e, 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x4d, 0x65, 0x74, 0x61, 0x48, + 0x65, 0x61, 0x64, 0x65, 0x72, 0x52, 0x0a, 0x6d, 0x65, 0x74, 0x61, 0x48, 0x65, 0x61, 0x64, 0x65, + 0x72, 0x12, 0x52, 0x0a, 0x0d, 0x76, 0x65, 0x72, 0x69, 0x66, 0x79, 0x5f, 0x68, 0x65, 0x61, 0x64, + 0x65, 0x72, 0x18, 0x03, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x2d, 0x2e, 0x6e, 0x65, 0x6f, 0x2e, 0x66, + 0x73, 0x2e, 0x76, 0x32, 0x2e, 0x73, 0x65, 0x73, 0x73, 0x69, 0x6f, 0x6e, 0x2e, 0x52, 0x65, 0x73, + 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x56, 0x65, 0x72, 0x69, 0x66, 0x69, 0x63, 0x61, 0x74, 0x69, 0x6f, + 0x6e, 0x48, 0x65, 0x61, 0x64, 0x65, 0x72, 0x52, 0x0c, 0x76, 0x65, 0x72, 0x69, 0x66, 0x79, 0x48, + 0x65, 0x61, 0x64, 0x65, 0x72, 0x1a, 0x06, 0x0a, 0x04, 0x42, 0x6f, 0x64, 0x79, 0x22, 0xbe, 0x02, + 0x0a, 0x15, 0x47, 0x65, 0x74, 0x45, 0x78, 0x74, 0x65, 0x6e, 0x64, 0x65, 0x64, 0x41, 0x43, 0x4c, + 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x12, 0x43, 0x0a, 0x04, 0x62, 0x6f, 0x64, 0x79, 0x18, + 0x01, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x2f, 0x2e, 0x6e, 0x65, 0x6f, 0x2e, 0x66, 0x73, 0x2e, 0x76, + 0x32, 0x2e, 0x63, 0x6f, 0x6e, 0x74, 0x61, 0x69, 0x6e, 0x65, 0x72, 0x2e, 0x47, 0x65, 0x74, 0x45, + 0x78, 0x74, 0x65, 0x6e, 0x64, 0x65, 0x64, 0x41, 0x43, 0x4c, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, + 0x74, 0x2e, 0x42, 0x6f, 0x64, 0x79, 0x52, 0x04, 0x62, 0x6f, 0x64, 0x79, 0x12, 0x45, 0x0a, 0x0b, + 0x6d, 0x65, 0x74, 0x61, 0x5f, 0x68, 0x65, 0x61, 0x64, 0x65, 0x72, 0x18, 0x02, 0x20, 0x01, 0x28, + 0x0b, 0x32, 0x24, 0x2e, 0x6e, 0x65, 0x6f, 0x2e, 0x66, 0x73, 0x2e, 0x76, 0x32, 0x2e, 0x73, 0x65, + 0x73, 0x73, 0x69, 0x6f, 0x6e, 0x2e, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x4d, 0x65, 0x74, + 0x61, 0x48, 0x65, 0x61, 0x64, 0x65, 0x72, 0x52, 0x0a, 0x6d, 0x65, 0x74, 0x61, 0x48, 0x65, 0x61, + 0x64, 0x65, 0x72, 0x12, 0x51, 0x0a, 0x0d, 0x76, 0x65, 0x72, 0x69, 0x66, 0x79, 0x5f, 0x68, 0x65, + 0x61, 0x64, 0x65, 0x72, 0x18, 0x03, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x2c, 0x2e, 0x6e, 0x65, 0x6f, + 0x2e, 0x66, 0x73, 0x2e, 0x76, 0x32, 0x2e, 0x73, 0x65, 0x73, 0x73, 0x69, 0x6f, 0x6e, 0x2e, 0x52, + 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x56, 0x65, 0x72, 0x69, 0x66, 0x69, 0x63, 0x61, 0x74, 0x69, + 0x6f, 0x6e, 0x48, 0x65, 0x61, 0x64, 0x65, 0x72, 0x52, 0x0c, 0x76, 0x65, 0x72, 0x69, 0x66, 0x79, + 0x48, 0x65, 0x61, 0x64, 0x65, 0x72, 0x1a, 0x46, 0x0a, 0x04, 0x42, 0x6f, 0x64, 0x79, 0x12, 0x3e, + 0x0a, 0x0c, 0x63, 0x6f, 0x6e, 0x74, 0x61, 0x69, 0x6e, 0x65, 0x72, 0x5f, 0x69, 0x64, 0x18, 0x01, + 0x20, 0x01, 0x28, 0x0b, 0x32, 0x1b, 0x2e, 0x6e, 0x65, 0x6f, 0x2e, 0x66, 0x73, 0x2e, 0x76, 0x32, + 0x2e, 0x72, 0x65, 0x66, 0x73, 0x2e, 0x43, 0x6f, 0x6e, 0x74, 0x61, 0x69, 0x6e, 0x65, 0x72, 0x49, + 0x44, 0x52, 0x0b, 0x63, 0x6f, 0x6e, 0x74, 0x61, 0x69, 0x6e, 0x65, 0x72, 0x49, 0x64, 0x22, 0xb7, + 0x03, 0x0a, 0x16, 0x47, 0x65, 0x74, 0x45, 0x78, 0x74, 0x65, 0x6e, 0x64, 0x65, 0x64, 0x41, 0x43, + 0x4c, 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x12, 0x44, 0x0a, 0x04, 0x62, 0x6f, 0x64, + 0x79, 0x18, 0x01, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x30, 0x2e, 0x6e, 0x65, 0x6f, 0x2e, 0x66, 0x73, + 0x2e, 0x76, 0x32, 0x2e, 0x63, 0x6f, 0x6e, 0x74, 0x61, 0x69, 0x6e, 0x65, 0x72, 0x2e, 0x47, 0x65, + 0x74, 0x45, 0x78, 0x74, 0x65, 0x6e, 0x64, 0x65, 0x64, 0x41, 0x43, 0x4c, 0x52, 0x65, 0x73, 0x70, + 0x6f, 0x6e, 0x73, 0x65, 0x2e, 0x42, 0x6f, 0x64, 0x79, 0x52, 0x04, 0x62, 0x6f, 0x64, 0x79, 0x12, + 0x46, 0x0a, 0x0b, 0x6d, 0x65, 0x74, 0x61, 0x5f, 0x68, 0x65, 0x61, 0x64, 0x65, 0x72, 0x18, 0x02, + 0x20, 0x01, 0x28, 0x0b, 0x32, 0x25, 0x2e, 0x6e, 0x65, 0x6f, 0x2e, 0x66, 0x73, 0x2e, 0x76, 0x32, + 0x2e, 0x73, 0x65, 0x73, 0x73, 0x69, 0x6f, 0x6e, 0x2e, 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, + 0x65, 0x4d, 0x65, 0x74, 0x61, 0x48, 0x65, 0x61, 0x64, 0x65, 0x72, 0x52, 0x0a, 0x6d, 0x65, 0x74, + 0x61, 0x48, 0x65, 0x61, 0x64, 0x65, 0x72, 0x12, 0x52, 0x0a, 0x0d, 0x76, 0x65, 0x72, 0x69, 0x66, + 0x79, 0x5f, 0x68, 0x65, 0x61, 0x64, 0x65, 0x72, 0x18, 0x03, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x2d, + 0x2e, 0x6e, 0x65, 0x6f, 0x2e, 0x66, 0x73, 0x2e, 0x76, 0x32, 0x2e, 0x73, 0x65, 0x73, 0x73, 0x69, + 0x6f, 0x6e, 0x2e, 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x56, 0x65, 0x72, 0x69, 0x66, + 0x69, 0x63, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x48, 0x65, 0x61, 0x64, 0x65, 0x72, 0x52, 0x0c, 0x76, + 0x65, 0x72, 0x69, 0x66, 0x79, 0x48, 0x65, 0x61, 0x64, 0x65, 0x72, 0x1a, 0xba, 0x01, 0x0a, 0x04, + 0x42, 0x6f, 0x64, 0x79, 0x12, 0x2c, 0x0a, 0x04, 0x65, 0x61, 0x63, 0x6c, 0x18, 0x01, 0x20, 0x01, + 0x28, 0x0b, 0x32, 0x18, 0x2e, 0x6e, 0x65, 0x6f, 0x2e, 0x66, 0x73, 0x2e, 0x76, 0x32, 0x2e, 0x61, + 0x63, 0x6c, 0x2e, 0x45, 0x41, 0x43, 0x4c, 0x54, 0x61, 0x62, 0x6c, 0x65, 0x52, 0x04, 0x65, 0x61, + 0x63, 0x6c, 0x12, 0x3e, 0x0a, 0x09, 0x73, 0x69, 0x67, 0x6e, 0x61, 0x74, 0x75, 0x72, 0x65, 0x18, + 0x02, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x20, 0x2e, 0x6e, 0x65, 0x6f, 0x2e, 0x66, 0x73, 0x2e, 0x76, + 0x32, 0x2e, 0x72, 0x65, 0x66, 0x73, 0x2e, 0x53, 0x69, 0x67, 0x6e, 0x61, 0x74, 0x75, 0x72, 0x65, + 0x52, 0x46, 0x43, 0x36, 0x39, 0x37, 0x39, 0x52, 0x09, 0x73, 0x69, 0x67, 0x6e, 0x61, 0x74, 0x75, + 0x72, 0x65, 0x12, 0x44, 0x0a, 0x0d, 0x73, 0x65, 0x73, 0x73, 0x69, 0x6f, 0x6e, 0x5f, 0x74, 0x6f, + 0x6b, 0x65, 0x6e, 0x18, 0x03, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x1f, 0x2e, 0x6e, 0x65, 0x6f, 0x2e, + 0x66, 0x73, 0x2e, 0x76, 0x32, 0x2e, 0x73, 0x65, 0x73, 0x73, 0x69, 0x6f, 0x6e, 0x2e, 0x53, 0x65, + 0x73, 0x73, 0x69, 0x6f, 0x6e, 0x54, 0x6f, 0x6b, 0x65, 0x6e, 0x52, 0x0c, 0x73, 0x65, 0x73, 0x73, + 0x69, 0x6f, 0x6e, 0x54, 0x6f, 0x6b, 0x65, 0x6e, 0x22, 0xf2, 0x03, 0x0a, 0x18, 0x41, 0x6e, 0x6e, + 0x6f, 0x75, 0x6e, 0x63, 0x65, 0x55, 0x73, 0x65, 0x64, 0x53, 0x70, 0x61, 0x63, 0x65, 0x52, 0x65, + 0x71, 0x75, 0x65, 0x73, 0x74, 0x12, 0x46, 0x0a, 0x04, 0x62, 0x6f, 0x64, 0x79, 0x18, 0x01, 0x20, + 0x01, 0x28, 0x0b, 0x32, 0x32, 0x2e, 0x6e, 0x65, 0x6f, 0x2e, 0x66, 0x73, 0x2e, 0x76, 0x32, 0x2e, + 0x63, 0x6f, 0x6e, 0x74, 0x61, 0x69, 0x6e, 0x65, 0x72, 0x2e, 0x41, 0x6e, 0x6e, 0x6f, 0x75, 0x6e, + 0x63, 0x65, 0x55, 0x73, 0x65, 0x64, 0x53, 0x70, 0x61, 0x63, 0x65, 0x52, 0x65, 0x71, 0x75, 0x65, + 0x73, 0x74, 0x2e, 0x42, 0x6f, 0x64, 0x79, 0x52, 0x04, 0x62, 0x6f, 0x64, 0x79, 0x12, 0x45, 0x0a, + 0x0b, 0x6d, 0x65, 0x74, 0x61, 0x5f, 0x68, 0x65, 0x61, 0x64, 0x65, 0x72, 0x18, 0x02, 0x20, 0x01, + 0x28, 0x0b, 0x32, 0x24, 0x2e, 0x6e, 0x65, 0x6f, 0x2e, 0x66, 0x73, 0x2e, 0x76, 0x32, 0x2e, 0x73, + 0x65, 0x73, 0x73, 0x69, 0x6f, 0x6e, 0x2e, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x4d, 0x65, + 0x74, 0x61, 0x48, 0x65, 0x61, 0x64, 0x65, 0x72, 0x52, 0x0a, 0x6d, 0x65, 0x74, 0x61, 0x48, 0x65, + 0x61, 0x64, 0x65, 0x72, 0x12, 0x51, 0x0a, 0x0d, 0x76, 0x65, 0x72, 0x69, 0x66, 0x79, 0x5f, 0x68, + 0x65, 0x61, 0x64, 0x65, 0x72, 0x18, 0x03, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x2c, 0x2e, 0x6e, 0x65, + 0x6f, 0x2e, 0x66, 0x73, 0x2e, 0x76, 0x32, 0x2e, 0x73, 0x65, 0x73, 0x73, 0x69, 0x6f, 0x6e, 0x2e, + 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x56, 0x65, 0x72, 0x69, 0x66, 0x69, 0x63, 0x61, 0x74, + 0x69, 0x6f, 0x6e, 0x48, 0x65, 0x61, 0x64, 0x65, 0x72, 0x52, 0x0c, 0x76, 0x65, 0x72, 0x69, 0x66, + 0x79, 0x48, 0x65, 0x61, 0x64, 0x65, 0x72, 0x1a, 0xf3, 0x01, 0x0a, 0x04, 0x42, 0x6f, 0x64, 0x79, + 0x12, 0x65, 0x0a, 0x0d, 0x61, 0x6e, 0x6e, 0x6f, 0x75, 0x6e, 0x63, 0x65, 0x6d, 0x65, 0x6e, 0x74, + 0x73, 0x18, 0x01, 0x20, 0x03, 0x28, 0x0b, 0x32, 0x3f, 0x2e, 0x6e, 0x65, 0x6f, 0x2e, 0x66, 0x73, + 0x2e, 0x76, 0x32, 0x2e, 0x63, 0x6f, 0x6e, 0x74, 0x61, 0x69, 0x6e, 0x65, 0x72, 0x2e, 0x41, 0x6e, + 0x6e, 0x6f, 0x75, 0x6e, 0x63, 0x65, 0x55, 0x73, 0x65, 0x64, 0x53, 0x70, 0x61, 0x63, 0x65, 0x52, + 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x2e, 0x42, 0x6f, 0x64, 0x79, 0x2e, 0x41, 0x6e, 0x6e, 0x6f, + 0x75, 0x6e, 0x63, 0x65, 0x6d, 0x65, 0x6e, 0x74, 0x52, 0x0d, 0x61, 0x6e, 0x6e, 0x6f, 0x75, 0x6e, + 0x63, 0x65, 0x6d, 0x65, 0x6e, 0x74, 0x73, 0x1a, 0x83, 0x01, 0x0a, 0x0c, 0x41, 0x6e, 0x6e, 0x6f, + 0x75, 0x6e, 0x63, 0x65, 0x6d, 0x65, 0x6e, 0x74, 0x12, 0x14, 0x0a, 0x05, 0x65, 0x70, 0x6f, 0x63, + 0x68, 0x18, 0x01, 0x20, 0x01, 0x28, 0x04, 0x52, 0x05, 0x65, 0x70, 0x6f, 0x63, 0x68, 0x12, 0x3e, + 0x0a, 0x0c, 0x63, 0x6f, 0x6e, 0x74, 0x61, 0x69, 0x6e, 0x65, 0x72, 0x5f, 0x69, 0x64, 0x18, 0x02, + 0x20, 0x01, 0x28, 0x0b, 0x32, 0x1b, 0x2e, 0x6e, 0x65, 0x6f, 0x2e, 0x66, 0x73, 0x2e, 0x76, 0x32, + 0x2e, 0x72, 0x65, 0x66, 0x73, 0x2e, 0x43, 0x6f, 0x6e, 0x74, 0x61, 0x69, 0x6e, 0x65, 0x72, 0x49, + 0x44, 0x52, 0x0b, 0x63, 0x6f, 0x6e, 0x74, 0x61, 0x69, 0x6e, 0x65, 0x72, 0x49, 0x64, 0x12, 0x1d, + 0x0a, 0x0a, 0x75, 0x73, 0x65, 0x64, 0x5f, 0x73, 0x70, 0x61, 0x63, 0x65, 0x18, 0x03, 0x20, 0x01, + 0x28, 0x04, 0x52, 0x09, 0x75, 0x73, 0x65, 0x64, 0x53, 0x70, 0x61, 0x63, 0x65, 0x22, 0x88, 0x02, + 0x0a, 0x19, 0x41, 0x6e, 0x6e, 0x6f, 0x75, 0x6e, 0x63, 0x65, 0x55, 0x73, 0x65, 0x64, 0x53, 0x70, + 0x61, 0x63, 0x65, 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x12, 0x47, 0x0a, 0x04, 0x62, + 0x6f, 0x64, 0x79, 0x18, 0x01, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x33, 0x2e, 0x6e, 0x65, 0x6f, 0x2e, + 0x66, 0x73, 0x2e, 0x76, 0x32, 0x2e, 0x63, 0x6f, 0x6e, 0x74, 0x61, 0x69, 0x6e, 0x65, 0x72, 0x2e, + 0x41, 0x6e, 0x6e, 0x6f, 0x75, 0x6e, 0x63, 0x65, 0x55, 0x73, 0x65, 0x64, 0x53, 0x70, 0x61, 0x63, + 0x65, 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x2e, 0x42, 0x6f, 0x64, 0x79, 0x52, 0x04, + 0x62, 0x6f, 0x64, 0x79, 0x12, 0x46, 0x0a, 0x0b, 0x6d, 0x65, 0x74, 0x61, 0x5f, 0x68, 0x65, 0x61, + 0x64, 0x65, 0x72, 0x18, 0x02, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x25, 0x2e, 0x6e, 0x65, 0x6f, 0x2e, + 0x66, 0x73, 0x2e, 0x76, 0x32, 0x2e, 0x73, 0x65, 0x73, 0x73, 0x69, 0x6f, 0x6e, 0x2e, 0x52, 0x65, + 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x4d, 0x65, 0x74, 0x61, 0x48, 0x65, 0x61, 0x64, 0x65, 0x72, + 0x52, 0x0a, 0x6d, 0x65, 0x74, 0x61, 0x48, 0x65, 0x61, 0x64, 0x65, 0x72, 0x12, 0x52, 0x0a, 0x0d, + 0x76, 0x65, 0x72, 0x69, 0x66, 0x79, 0x5f, 0x68, 0x65, 0x61, 0x64, 0x65, 0x72, 0x18, 0x03, 0x20, + 0x01, 0x28, 0x0b, 0x32, 0x2d, 0x2e, 0x6e, 0x65, 0x6f, 0x2e, 0x66, 0x73, 0x2e, 0x76, 0x32, 0x2e, + 0x73, 0x65, 0x73, 0x73, 0x69, 0x6f, 0x6e, 0x2e, 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, + 0x56, 0x65, 0x72, 0x69, 0x66, 0x69, 0x63, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x48, 0x65, 0x61, 0x64, + 0x65, 0x72, 0x52, 0x0c, 0x76, 0x65, 0x72, 0x69, 0x66, 0x79, 0x48, 0x65, 0x61, 0x64, 0x65, 0x72, + 0x1a, 0x06, 0x0a, 0x04, 0x42, 0x6f, 0x64, 0x79, 0x32, 0x90, 0x05, 0x0a, 0x10, 0x43, 0x6f, 0x6e, + 0x74, 0x61, 0x69, 0x6e, 0x65, 0x72, 0x53, 0x65, 0x72, 0x76, 0x69, 0x63, 0x65, 0x12, 0x48, 0x0a, + 0x03, 0x50, 0x75, 0x74, 0x12, 0x1f, 0x2e, 0x6e, 0x65, 0x6f, 0x2e, 0x66, 0x73, 0x2e, 0x76, 0x32, + 0x2e, 0x63, 0x6f, 0x6e, 0x74, 0x61, 0x69, 0x6e, 0x65, 0x72, 0x2e, 0x50, 0x75, 0x74, 0x52, 0x65, + 0x71, 0x75, 0x65, 0x73, 0x74, 0x1a, 0x20, 0x2e, 0x6e, 0x65, 0x6f, 0x2e, 0x66, 0x73, 0x2e, 0x76, + 0x32, 0x2e, 0x63, 0x6f, 0x6e, 0x74, 0x61, 0x69, 0x6e, 0x65, 0x72, 0x2e, 0x50, 0x75, 0x74, 0x52, + 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x12, 0x51, 0x0a, 0x06, 0x44, 0x65, 0x6c, 0x65, 0x74, + 0x65, 0x12, 0x22, 0x2e, 0x6e, 0x65, 0x6f, 0x2e, 0x66, 0x73, 0x2e, 0x76, 0x32, 0x2e, 0x63, 0x6f, + 0x6e, 0x74, 0x61, 0x69, 0x6e, 0x65, 0x72, 0x2e, 0x44, 0x65, 0x6c, 0x65, 0x74, 0x65, 0x52, 0x65, + 0x71, 0x75, 0x65, 0x73, 0x74, 0x1a, 0x23, 0x2e, 0x6e, 0x65, 0x6f, 0x2e, 0x66, 0x73, 0x2e, 0x76, + 0x32, 0x2e, 0x63, 0x6f, 0x6e, 0x74, 0x61, 0x69, 0x6e, 0x65, 0x72, 0x2e, 0x44, 0x65, 0x6c, 0x65, + 0x74, 0x65, 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x12, 0x48, 0x0a, 0x03, 0x47, 0x65, + 0x74, 0x12, 0x1f, 0x2e, 0x6e, 0x65, 0x6f, 0x2e, 0x66, 0x73, 0x2e, 0x76, 0x32, 0x2e, 0x63, 0x6f, + 0x6e, 0x74, 0x61, 0x69, 0x6e, 0x65, 0x72, 0x2e, 0x47, 0x65, 0x74, 0x52, 0x65, 0x71, 0x75, 0x65, + 0x73, 0x74, 0x1a, 0x20, 0x2e, 0x6e, 0x65, 0x6f, 0x2e, 0x66, 0x73, 0x2e, 0x76, 0x32, 0x2e, 0x63, + 0x6f, 0x6e, 0x74, 0x61, 0x69, 0x6e, 0x65, 0x72, 0x2e, 0x47, 0x65, 0x74, 0x52, 0x65, 0x73, 0x70, + 0x6f, 0x6e, 0x73, 0x65, 0x12, 0x4b, 0x0a, 0x04, 0x4c, 0x69, 0x73, 0x74, 0x12, 0x20, 0x2e, 0x6e, + 0x65, 0x6f, 0x2e, 0x66, 0x73, 0x2e, 0x76, 0x32, 0x2e, 0x63, 0x6f, 0x6e, 0x74, 0x61, 0x69, 0x6e, + 0x65, 0x72, 0x2e, 0x4c, 0x69, 0x73, 0x74, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x1a, 0x21, + 0x2e, 0x6e, 0x65, 0x6f, 0x2e, 0x66, 0x73, 0x2e, 0x76, 0x32, 0x2e, 0x63, 0x6f, 0x6e, 0x74, 0x61, + 0x69, 0x6e, 0x65, 0x72, 0x2e, 0x4c, 0x69, 0x73, 0x74, 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, + 0x65, 0x12, 0x69, 0x0a, 0x0e, 0x53, 0x65, 0x74, 0x45, 0x78, 0x74, 0x65, 0x6e, 0x64, 0x65, 0x64, + 0x41, 0x43, 0x4c, 0x12, 0x2a, 0x2e, 0x6e, 0x65, 0x6f, 0x2e, 0x66, 0x73, 0x2e, 0x76, 0x32, 0x2e, + 0x63, 0x6f, 0x6e, 0x74, 0x61, 0x69, 0x6e, 0x65, 0x72, 0x2e, 0x53, 0x65, 0x74, 0x45, 0x78, 0x74, + 0x65, 0x6e, 0x64, 0x65, 0x64, 0x41, 0x43, 0x4c, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x1a, + 0x2b, 0x2e, 0x6e, 0x65, 0x6f, 0x2e, 0x66, 0x73, 0x2e, 0x76, 0x32, 0x2e, 0x63, 0x6f, 0x6e, 0x74, + 0x61, 0x69, 0x6e, 0x65, 0x72, 0x2e, 0x53, 0x65, 0x74, 0x45, 0x78, 0x74, 0x65, 0x6e, 0x64, 0x65, + 0x64, 0x41, 0x43, 0x4c, 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x12, 0x69, 0x0a, 0x0e, + 0x47, 0x65, 0x74, 0x45, 0x78, 0x74, 0x65, 0x6e, 0x64, 0x65, 0x64, 0x41, 0x43, 0x4c, 0x12, 0x2a, + 0x2e, 0x6e, 0x65, 0x6f, 0x2e, 0x66, 0x73, 0x2e, 0x76, 0x32, 0x2e, 0x63, 0x6f, 0x6e, 0x74, 0x61, + 0x69, 0x6e, 0x65, 0x72, 0x2e, 0x47, 0x65, 0x74, 0x45, 0x78, 0x74, 0x65, 0x6e, 0x64, 0x65, 0x64, + 0x41, 0x43, 0x4c, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x1a, 0x2b, 0x2e, 0x6e, 0x65, 0x6f, + 0x2e, 0x66, 0x73, 0x2e, 0x76, 0x32, 0x2e, 0x63, 0x6f, 0x6e, 0x74, 0x61, 0x69, 0x6e, 0x65, 0x72, + 0x2e, 0x47, 0x65, 0x74, 0x45, 0x78, 0x74, 0x65, 0x6e, 0x64, 0x65, 0x64, 0x41, 0x43, 0x4c, 0x52, + 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x12, 0x72, 0x0a, 0x11, 0x41, 0x6e, 0x6e, 0x6f, 0x75, + 0x6e, 0x63, 0x65, 0x55, 0x73, 0x65, 0x64, 0x53, 0x70, 0x61, 0x63, 0x65, 0x12, 0x2d, 0x2e, 0x6e, + 0x65, 0x6f, 0x2e, 0x66, 0x73, 0x2e, 0x76, 0x32, 0x2e, 0x63, 0x6f, 0x6e, 0x74, 0x61, 0x69, 0x6e, + 0x65, 0x72, 0x2e, 0x41, 0x6e, 0x6e, 0x6f, 0x75, 0x6e, 0x63, 0x65, 0x55, 0x73, 0x65, 0x64, 0x53, + 0x70, 0x61, 0x63, 0x65, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x1a, 0x2e, 0x2e, 0x6e, 0x65, + 0x6f, 0x2e, 0x66, 0x73, 0x2e, 0x76, 0x32, 0x2e, 0x63, 0x6f, 0x6e, 0x74, 0x61, 0x69, 0x6e, 0x65, + 0x72, 0x2e, 0x41, 0x6e, 0x6e, 0x6f, 0x75, 0x6e, 0x63, 0x65, 0x55, 0x73, 0x65, 0x64, 0x53, 0x70, + 0x61, 0x63, 0x65, 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x42, 0x5f, 0x5a, 0x3d, 0x67, + 0x69, 0x74, 0x68, 0x75, 0x62, 0x2e, 0x63, 0x6f, 0x6d, 0x2f, 0x6e, 0x73, 0x70, 0x63, 0x63, 0x2d, + 0x64, 0x65, 0x76, 0x2f, 0x6e, 0x65, 0x6f, 0x66, 0x73, 0x2d, 0x61, 0x70, 0x69, 0x2d, 0x67, 0x6f, + 0x2f, 0x76, 0x32, 0x2f, 0x63, 0x6f, 0x6e, 0x74, 0x61, 0x69, 0x6e, 0x65, 0x72, 0x2f, 0x67, 0x72, + 0x70, 0x63, 0x3b, 0x63, 0x6f, 0x6e, 0x74, 0x61, 0x69, 0x6e, 0x65, 0x72, 0xaa, 0x02, 0x1d, 0x4e, + 0x65, 0x6f, 0x2e, 0x46, 0x69, 0x6c, 0x65, 0x53, 0x74, 0x6f, 0x72, 0x61, 0x67, 0x65, 0x2e, 0x41, + 0x50, 0x49, 0x2e, 0x43, 0x6f, 0x6e, 0x74, 0x61, 0x69, 0x6e, 0x65, 0x72, 0x62, 0x06, 0x70, 0x72, + 0x6f, 0x74, 0x6f, 0x33, +} + +var ( + file_container_grpc_service_proto_rawDescOnce sync.Once + file_container_grpc_service_proto_rawDescData = file_container_grpc_service_proto_rawDesc +) + +func file_container_grpc_service_proto_rawDescGZIP() []byte { + file_container_grpc_service_proto_rawDescOnce.Do(func() { + file_container_grpc_service_proto_rawDescData = protoimpl.X.CompressGZIP(file_container_grpc_service_proto_rawDescData) + }) + return file_container_grpc_service_proto_rawDescData +} + +var file_container_grpc_service_proto_msgTypes = make([]protoimpl.MessageInfo, 29) +var file_container_grpc_service_proto_goTypes = []interface{}{ + (*PutRequest)(nil), // 0: neo.fs.v2.container.PutRequest + (*PutResponse)(nil), // 1: neo.fs.v2.container.PutResponse + (*DeleteRequest)(nil), // 2: neo.fs.v2.container.DeleteRequest + (*DeleteResponse)(nil), // 3: neo.fs.v2.container.DeleteResponse + (*GetRequest)(nil), // 4: neo.fs.v2.container.GetRequest + (*GetResponse)(nil), // 5: neo.fs.v2.container.GetResponse + (*ListRequest)(nil), // 6: neo.fs.v2.container.ListRequest + (*ListResponse)(nil), // 7: neo.fs.v2.container.ListResponse + (*SetExtendedACLRequest)(nil), // 8: neo.fs.v2.container.SetExtendedACLRequest + (*SetExtendedACLResponse)(nil), // 9: neo.fs.v2.container.SetExtendedACLResponse + (*GetExtendedACLRequest)(nil), // 10: neo.fs.v2.container.GetExtendedACLRequest + (*GetExtendedACLResponse)(nil), // 11: neo.fs.v2.container.GetExtendedACLResponse + (*AnnounceUsedSpaceRequest)(nil), // 12: neo.fs.v2.container.AnnounceUsedSpaceRequest + (*AnnounceUsedSpaceResponse)(nil), // 13: neo.fs.v2.container.AnnounceUsedSpaceResponse + (*PutRequest_Body)(nil), // 14: neo.fs.v2.container.PutRequest.Body + (*PutResponse_Body)(nil), // 15: neo.fs.v2.container.PutResponse.Body + (*DeleteRequest_Body)(nil), // 16: neo.fs.v2.container.DeleteRequest.Body + (*DeleteResponse_Body)(nil), // 17: neo.fs.v2.container.DeleteResponse.Body + (*GetRequest_Body)(nil), // 18: neo.fs.v2.container.GetRequest.Body + (*GetResponse_Body)(nil), // 19: neo.fs.v2.container.GetResponse.Body + (*ListRequest_Body)(nil), // 20: neo.fs.v2.container.ListRequest.Body + (*ListResponse_Body)(nil), // 21: neo.fs.v2.container.ListResponse.Body + (*SetExtendedACLRequest_Body)(nil), // 22: neo.fs.v2.container.SetExtendedACLRequest.Body + (*SetExtendedACLResponse_Body)(nil), // 23: neo.fs.v2.container.SetExtendedACLResponse.Body + (*GetExtendedACLRequest_Body)(nil), // 24: neo.fs.v2.container.GetExtendedACLRequest.Body + (*GetExtendedACLResponse_Body)(nil), // 25: neo.fs.v2.container.GetExtendedACLResponse.Body + (*AnnounceUsedSpaceRequest_Body)(nil), // 26: neo.fs.v2.container.AnnounceUsedSpaceRequest.Body + (*AnnounceUsedSpaceRequest_Body_Announcement)(nil), // 27: neo.fs.v2.container.AnnounceUsedSpaceRequest.Body.Announcement + (*AnnounceUsedSpaceResponse_Body)(nil), // 28: neo.fs.v2.container.AnnounceUsedSpaceResponse.Body + (*session.RequestMetaHeader)(nil), // 29: neo.fs.v2.session.RequestMetaHeader + (*session.RequestVerificationHeader)(nil), // 30: neo.fs.v2.session.RequestVerificationHeader + (*session.ResponseMetaHeader)(nil), // 31: neo.fs.v2.session.ResponseMetaHeader + (*session.ResponseVerificationHeader)(nil), // 32: neo.fs.v2.session.ResponseVerificationHeader + (*Container)(nil), // 33: neo.fs.v2.container.Container + (*refs.SignatureRFC6979)(nil), // 34: neo.fs.v2.refs.SignatureRFC6979 + (*refs.ContainerID)(nil), // 35: neo.fs.v2.refs.ContainerID + (*session.SessionToken)(nil), // 36: neo.fs.v2.session.SessionToken + (*refs.OwnerID)(nil), // 37: neo.fs.v2.refs.OwnerID + (*acl.EACLTable)(nil), // 38: neo.fs.v2.acl.EACLTable +} +var file_container_grpc_service_proto_depIdxs = []int32{ + 14, // 0: neo.fs.v2.container.PutRequest.body:type_name -> neo.fs.v2.container.PutRequest.Body + 29, // 1: neo.fs.v2.container.PutRequest.meta_header:type_name -> neo.fs.v2.session.RequestMetaHeader + 30, // 2: neo.fs.v2.container.PutRequest.verify_header:type_name -> neo.fs.v2.session.RequestVerificationHeader + 15, // 3: neo.fs.v2.container.PutResponse.body:type_name -> neo.fs.v2.container.PutResponse.Body + 31, // 4: neo.fs.v2.container.PutResponse.meta_header:type_name -> neo.fs.v2.session.ResponseMetaHeader + 32, // 5: neo.fs.v2.container.PutResponse.verify_header:type_name -> neo.fs.v2.session.ResponseVerificationHeader + 16, // 6: neo.fs.v2.container.DeleteRequest.body:type_name -> neo.fs.v2.container.DeleteRequest.Body + 29, // 7: neo.fs.v2.container.DeleteRequest.meta_header:type_name -> neo.fs.v2.session.RequestMetaHeader + 30, // 8: neo.fs.v2.container.DeleteRequest.verify_header:type_name -> neo.fs.v2.session.RequestVerificationHeader + 17, // 9: neo.fs.v2.container.DeleteResponse.body:type_name -> neo.fs.v2.container.DeleteResponse.Body + 31, // 10: neo.fs.v2.container.DeleteResponse.meta_header:type_name -> neo.fs.v2.session.ResponseMetaHeader + 32, // 11: neo.fs.v2.container.DeleteResponse.verify_header:type_name -> neo.fs.v2.session.ResponseVerificationHeader + 18, // 12: neo.fs.v2.container.GetRequest.body:type_name -> neo.fs.v2.container.GetRequest.Body + 29, // 13: neo.fs.v2.container.GetRequest.meta_header:type_name -> neo.fs.v2.session.RequestMetaHeader + 30, // 14: neo.fs.v2.container.GetRequest.verify_header:type_name -> neo.fs.v2.session.RequestVerificationHeader + 19, // 15: neo.fs.v2.container.GetResponse.body:type_name -> neo.fs.v2.container.GetResponse.Body + 31, // 16: neo.fs.v2.container.GetResponse.meta_header:type_name -> neo.fs.v2.session.ResponseMetaHeader + 32, // 17: neo.fs.v2.container.GetResponse.verify_header:type_name -> neo.fs.v2.session.ResponseVerificationHeader + 20, // 18: neo.fs.v2.container.ListRequest.body:type_name -> neo.fs.v2.container.ListRequest.Body + 29, // 19: neo.fs.v2.container.ListRequest.meta_header:type_name -> neo.fs.v2.session.RequestMetaHeader + 30, // 20: neo.fs.v2.container.ListRequest.verify_header:type_name -> neo.fs.v2.session.RequestVerificationHeader + 21, // 21: neo.fs.v2.container.ListResponse.body:type_name -> neo.fs.v2.container.ListResponse.Body + 31, // 22: neo.fs.v2.container.ListResponse.meta_header:type_name -> neo.fs.v2.session.ResponseMetaHeader + 32, // 23: neo.fs.v2.container.ListResponse.verify_header:type_name -> neo.fs.v2.session.ResponseVerificationHeader + 22, // 24: neo.fs.v2.container.SetExtendedACLRequest.body:type_name -> neo.fs.v2.container.SetExtendedACLRequest.Body + 29, // 25: neo.fs.v2.container.SetExtendedACLRequest.meta_header:type_name -> neo.fs.v2.session.RequestMetaHeader + 30, // 26: neo.fs.v2.container.SetExtendedACLRequest.verify_header:type_name -> neo.fs.v2.session.RequestVerificationHeader + 23, // 27: neo.fs.v2.container.SetExtendedACLResponse.body:type_name -> neo.fs.v2.container.SetExtendedACLResponse.Body + 31, // 28: neo.fs.v2.container.SetExtendedACLResponse.meta_header:type_name -> neo.fs.v2.session.ResponseMetaHeader + 32, // 29: neo.fs.v2.container.SetExtendedACLResponse.verify_header:type_name -> neo.fs.v2.session.ResponseVerificationHeader + 24, // 30: neo.fs.v2.container.GetExtendedACLRequest.body:type_name -> neo.fs.v2.container.GetExtendedACLRequest.Body + 29, // 31: neo.fs.v2.container.GetExtendedACLRequest.meta_header:type_name -> neo.fs.v2.session.RequestMetaHeader + 30, // 32: neo.fs.v2.container.GetExtendedACLRequest.verify_header:type_name -> neo.fs.v2.session.RequestVerificationHeader + 25, // 33: neo.fs.v2.container.GetExtendedACLResponse.body:type_name -> neo.fs.v2.container.GetExtendedACLResponse.Body + 31, // 34: neo.fs.v2.container.GetExtendedACLResponse.meta_header:type_name -> neo.fs.v2.session.ResponseMetaHeader + 32, // 35: neo.fs.v2.container.GetExtendedACLResponse.verify_header:type_name -> neo.fs.v2.session.ResponseVerificationHeader + 26, // 36: neo.fs.v2.container.AnnounceUsedSpaceRequest.body:type_name -> neo.fs.v2.container.AnnounceUsedSpaceRequest.Body + 29, // 37: neo.fs.v2.container.AnnounceUsedSpaceRequest.meta_header:type_name -> neo.fs.v2.session.RequestMetaHeader + 30, // 38: neo.fs.v2.container.AnnounceUsedSpaceRequest.verify_header:type_name -> neo.fs.v2.session.RequestVerificationHeader + 28, // 39: neo.fs.v2.container.AnnounceUsedSpaceResponse.body:type_name -> neo.fs.v2.container.AnnounceUsedSpaceResponse.Body + 31, // 40: neo.fs.v2.container.AnnounceUsedSpaceResponse.meta_header:type_name -> neo.fs.v2.session.ResponseMetaHeader + 32, // 41: neo.fs.v2.container.AnnounceUsedSpaceResponse.verify_header:type_name -> neo.fs.v2.session.ResponseVerificationHeader + 33, // 42: neo.fs.v2.container.PutRequest.Body.container:type_name -> neo.fs.v2.container.Container + 34, // 43: neo.fs.v2.container.PutRequest.Body.signature:type_name -> neo.fs.v2.refs.SignatureRFC6979 + 35, // 44: neo.fs.v2.container.PutResponse.Body.container_id:type_name -> neo.fs.v2.refs.ContainerID + 35, // 45: neo.fs.v2.container.DeleteRequest.Body.container_id:type_name -> neo.fs.v2.refs.ContainerID + 34, // 46: neo.fs.v2.container.DeleteRequest.Body.signature:type_name -> neo.fs.v2.refs.SignatureRFC6979 + 35, // 47: neo.fs.v2.container.GetRequest.Body.container_id:type_name -> neo.fs.v2.refs.ContainerID + 33, // 48: neo.fs.v2.container.GetResponse.Body.container:type_name -> neo.fs.v2.container.Container + 34, // 49: neo.fs.v2.container.GetResponse.Body.signature:type_name -> neo.fs.v2.refs.SignatureRFC6979 + 36, // 50: neo.fs.v2.container.GetResponse.Body.session_token:type_name -> neo.fs.v2.session.SessionToken + 37, // 51: neo.fs.v2.container.ListRequest.Body.owner_id:type_name -> neo.fs.v2.refs.OwnerID + 35, // 52: neo.fs.v2.container.ListResponse.Body.container_ids:type_name -> neo.fs.v2.refs.ContainerID + 38, // 53: neo.fs.v2.container.SetExtendedACLRequest.Body.eacl:type_name -> neo.fs.v2.acl.EACLTable + 34, // 54: neo.fs.v2.container.SetExtendedACLRequest.Body.signature:type_name -> neo.fs.v2.refs.SignatureRFC6979 + 35, // 55: neo.fs.v2.container.GetExtendedACLRequest.Body.container_id:type_name -> neo.fs.v2.refs.ContainerID + 38, // 56: neo.fs.v2.container.GetExtendedACLResponse.Body.eacl:type_name -> neo.fs.v2.acl.EACLTable + 34, // 57: neo.fs.v2.container.GetExtendedACLResponse.Body.signature:type_name -> neo.fs.v2.refs.SignatureRFC6979 + 36, // 58: neo.fs.v2.container.GetExtendedACLResponse.Body.session_token:type_name -> neo.fs.v2.session.SessionToken + 27, // 59: neo.fs.v2.container.AnnounceUsedSpaceRequest.Body.announcements:type_name -> neo.fs.v2.container.AnnounceUsedSpaceRequest.Body.Announcement + 35, // 60: neo.fs.v2.container.AnnounceUsedSpaceRequest.Body.Announcement.container_id:type_name -> neo.fs.v2.refs.ContainerID + 0, // 61: neo.fs.v2.container.ContainerService.Put:input_type -> neo.fs.v2.container.PutRequest + 2, // 62: neo.fs.v2.container.ContainerService.Delete:input_type -> neo.fs.v2.container.DeleteRequest + 4, // 63: neo.fs.v2.container.ContainerService.Get:input_type -> neo.fs.v2.container.GetRequest + 6, // 64: neo.fs.v2.container.ContainerService.List:input_type -> neo.fs.v2.container.ListRequest + 8, // 65: neo.fs.v2.container.ContainerService.SetExtendedACL:input_type -> neo.fs.v2.container.SetExtendedACLRequest + 10, // 66: neo.fs.v2.container.ContainerService.GetExtendedACL:input_type -> neo.fs.v2.container.GetExtendedACLRequest + 12, // 67: neo.fs.v2.container.ContainerService.AnnounceUsedSpace:input_type -> neo.fs.v2.container.AnnounceUsedSpaceRequest + 1, // 68: neo.fs.v2.container.ContainerService.Put:output_type -> neo.fs.v2.container.PutResponse + 3, // 69: neo.fs.v2.container.ContainerService.Delete:output_type -> neo.fs.v2.container.DeleteResponse + 5, // 70: neo.fs.v2.container.ContainerService.Get:output_type -> neo.fs.v2.container.GetResponse + 7, // 71: neo.fs.v2.container.ContainerService.List:output_type -> neo.fs.v2.container.ListResponse + 9, // 72: neo.fs.v2.container.ContainerService.SetExtendedACL:output_type -> neo.fs.v2.container.SetExtendedACLResponse + 11, // 73: neo.fs.v2.container.ContainerService.GetExtendedACL:output_type -> neo.fs.v2.container.GetExtendedACLResponse + 13, // 74: neo.fs.v2.container.ContainerService.AnnounceUsedSpace:output_type -> neo.fs.v2.container.AnnounceUsedSpaceResponse + 68, // [68:75] is the sub-list for method output_type + 61, // [61:68] is the sub-list for method input_type + 61, // [61:61] is the sub-list for extension type_name + 61, // [61:61] is the sub-list for extension extendee + 0, // [0:61] is the sub-list for field type_name +} + +func init() { file_container_grpc_service_proto_init() } +func file_container_grpc_service_proto_init() { + if File_container_grpc_service_proto != nil { + return + } + file_container_grpc_types_proto_init() + if !protoimpl.UnsafeEnabled { + file_container_grpc_service_proto_msgTypes[0].Exporter = func(v interface{}, i int) interface{} { + switch v := v.(*PutRequest); i { + case 0: + return &v.state + case 1: + return &v.sizeCache + case 2: + return &v.unknownFields + default: + return nil + } + } + file_container_grpc_service_proto_msgTypes[1].Exporter = func(v interface{}, i int) interface{} { + switch v := v.(*PutResponse); i { + case 0: + return &v.state + case 1: + return &v.sizeCache + case 2: + return &v.unknownFields + default: + return nil + } + } + file_container_grpc_service_proto_msgTypes[2].Exporter = func(v interface{}, i int) interface{} { + switch v := v.(*DeleteRequest); i { + case 0: + return &v.state + case 1: + return &v.sizeCache + case 2: + return &v.unknownFields + default: + return nil + } + } + file_container_grpc_service_proto_msgTypes[3].Exporter = func(v interface{}, i int) interface{} { + switch v := v.(*DeleteResponse); i { + case 0: + return &v.state + case 1: + return &v.sizeCache + case 2: + return &v.unknownFields + default: + return nil + } + } + file_container_grpc_service_proto_msgTypes[4].Exporter = func(v interface{}, i int) interface{} { + switch v := v.(*GetRequest); i { + case 0: + return &v.state + case 1: + return &v.sizeCache + case 2: + return &v.unknownFields + default: + return nil + } + } + file_container_grpc_service_proto_msgTypes[5].Exporter = func(v interface{}, i int) interface{} { + switch v := v.(*GetResponse); i { + case 0: + return &v.state + case 1: + return &v.sizeCache + case 2: + return &v.unknownFields + default: + return nil + } + } + file_container_grpc_service_proto_msgTypes[6].Exporter = func(v interface{}, i int) interface{} { + switch v := v.(*ListRequest); i { + case 0: + return &v.state + case 1: + return &v.sizeCache + case 2: + return &v.unknownFields + default: + return nil + } + } + file_container_grpc_service_proto_msgTypes[7].Exporter = func(v interface{}, i int) interface{} { + switch v := v.(*ListResponse); i { + case 0: + return &v.state + case 1: + return &v.sizeCache + case 2: + return &v.unknownFields + default: + return nil + } + } + file_container_grpc_service_proto_msgTypes[8].Exporter = func(v interface{}, i int) interface{} { + switch v := v.(*SetExtendedACLRequest); i { + case 0: + return &v.state + case 1: + return &v.sizeCache + case 2: + return &v.unknownFields + default: + return nil + } + } + file_container_grpc_service_proto_msgTypes[9].Exporter = func(v interface{}, i int) interface{} { + switch v := v.(*SetExtendedACLResponse); i { + case 0: + return &v.state + case 1: + return &v.sizeCache + case 2: + return &v.unknownFields + default: + return nil + } + } + file_container_grpc_service_proto_msgTypes[10].Exporter = func(v interface{}, i int) interface{} { + switch v := v.(*GetExtendedACLRequest); i { + case 0: + return &v.state + case 1: + return &v.sizeCache + case 2: + return &v.unknownFields + default: + return nil + } + } + file_container_grpc_service_proto_msgTypes[11].Exporter = func(v interface{}, i int) interface{} { + switch v := v.(*GetExtendedACLResponse); i { + case 0: + return &v.state + case 1: + return &v.sizeCache + case 2: + return &v.unknownFields + default: + return nil + } + } + file_container_grpc_service_proto_msgTypes[12].Exporter = func(v interface{}, i int) interface{} { + switch v := v.(*AnnounceUsedSpaceRequest); i { + case 0: + return &v.state + case 1: + return &v.sizeCache + case 2: + return &v.unknownFields + default: + return nil + } + } + file_container_grpc_service_proto_msgTypes[13].Exporter = func(v interface{}, i int) interface{} { + switch v := v.(*AnnounceUsedSpaceResponse); i { + case 0: + return &v.state + case 1: + return &v.sizeCache + case 2: + return &v.unknownFields + default: + return nil + } + } + file_container_grpc_service_proto_msgTypes[14].Exporter = func(v interface{}, i int) interface{} { + switch v := v.(*PutRequest_Body); i { + case 0: + return &v.state + case 1: + return &v.sizeCache + case 2: + return &v.unknownFields + default: + return nil + } + } + file_container_grpc_service_proto_msgTypes[15].Exporter = func(v interface{}, i int) interface{} { + switch v := v.(*PutResponse_Body); i { + case 0: + return &v.state + case 1: + return &v.sizeCache + case 2: + return &v.unknownFields + default: + return nil + } + } + file_container_grpc_service_proto_msgTypes[16].Exporter = func(v interface{}, i int) interface{} { + switch v := v.(*DeleteRequest_Body); i { + case 0: + return &v.state + case 1: + return &v.sizeCache + case 2: + return &v.unknownFields + default: + return nil + } + } + file_container_grpc_service_proto_msgTypes[17].Exporter = func(v interface{}, i int) interface{} { + switch v := v.(*DeleteResponse_Body); i { + case 0: + return &v.state + case 1: + return &v.sizeCache + case 2: + return &v.unknownFields + default: + return nil + } + } + file_container_grpc_service_proto_msgTypes[18].Exporter = func(v interface{}, i int) interface{} { + switch v := v.(*GetRequest_Body); i { + case 0: + return &v.state + case 1: + return &v.sizeCache + case 2: + return &v.unknownFields + default: + return nil + } + } + file_container_grpc_service_proto_msgTypes[19].Exporter = func(v interface{}, i int) interface{} { + switch v := v.(*GetResponse_Body); i { + case 0: + return &v.state + case 1: + return &v.sizeCache + case 2: + return &v.unknownFields + default: + return nil + } + } + file_container_grpc_service_proto_msgTypes[20].Exporter = func(v interface{}, i int) interface{} { + switch v := v.(*ListRequest_Body); i { + case 0: + return &v.state + case 1: + return &v.sizeCache + case 2: + return &v.unknownFields + default: + return nil + } + } + file_container_grpc_service_proto_msgTypes[21].Exporter = func(v interface{}, i int) interface{} { + switch v := v.(*ListResponse_Body); i { + case 0: + return &v.state + case 1: + return &v.sizeCache + case 2: + return &v.unknownFields + default: + return nil + } + } + file_container_grpc_service_proto_msgTypes[22].Exporter = func(v interface{}, i int) interface{} { + switch v := v.(*SetExtendedACLRequest_Body); i { + case 0: + return &v.state + case 1: + return &v.sizeCache + case 2: + return &v.unknownFields + default: + return nil + } + } + file_container_grpc_service_proto_msgTypes[23].Exporter = func(v interface{}, i int) interface{} { + switch v := v.(*SetExtendedACLResponse_Body); i { + case 0: + return &v.state + case 1: + return &v.sizeCache + case 2: + return &v.unknownFields + default: + return nil + } + } + file_container_grpc_service_proto_msgTypes[24].Exporter = func(v interface{}, i int) interface{} { + switch v := v.(*GetExtendedACLRequest_Body); i { + case 0: + return &v.state + case 1: + return &v.sizeCache + case 2: + return &v.unknownFields + default: + return nil + } + } + file_container_grpc_service_proto_msgTypes[25].Exporter = func(v interface{}, i int) interface{} { + switch v := v.(*GetExtendedACLResponse_Body); i { + case 0: + return &v.state + case 1: + return &v.sizeCache + case 2: + return &v.unknownFields + default: + return nil + } + } + file_container_grpc_service_proto_msgTypes[26].Exporter = func(v interface{}, i int) interface{} { + switch v := v.(*AnnounceUsedSpaceRequest_Body); i { + case 0: + return &v.state + case 1: + return &v.sizeCache + case 2: + return &v.unknownFields + default: + return nil + } + } + file_container_grpc_service_proto_msgTypes[27].Exporter = func(v interface{}, i int) interface{} { + switch v := v.(*AnnounceUsedSpaceRequest_Body_Announcement); i { + case 0: + return &v.state + case 1: + return &v.sizeCache + case 2: + return &v.unknownFields + default: + return nil + } + } + file_container_grpc_service_proto_msgTypes[28].Exporter = func(v interface{}, i int) interface{} { + switch v := v.(*AnnounceUsedSpaceResponse_Body); i { + case 0: + return &v.state + case 1: + return &v.sizeCache + case 2: + return &v.unknownFields + default: + return nil + } + } + } + type x struct{} + out := protoimpl.TypeBuilder{ + File: protoimpl.DescBuilder{ + GoPackagePath: reflect.TypeOf(x{}).PkgPath(), + RawDescriptor: file_container_grpc_service_proto_rawDesc, + NumEnums: 0, + NumMessages: 29, + NumExtensions: 0, + NumServices: 1, + }, + GoTypes: file_container_grpc_service_proto_goTypes, + DependencyIndexes: file_container_grpc_service_proto_depIdxs, + MessageInfos: file_container_grpc_service_proto_msgTypes, + }.Build() + File_container_grpc_service_proto = out.File + file_container_grpc_service_proto_rawDesc = nil + file_container_grpc_service_proto_goTypes = nil + file_container_grpc_service_proto_depIdxs = nil +} diff --git a/api/container/service_grpc.pb.go b/api/container/service_grpc.pb.go new file mode 100644 index 000000000..47cafc501 --- /dev/null +++ b/api/container/service_grpc.pb.go @@ -0,0 +1,449 @@ +// Code generated by protoc-gen-go-grpc. DO NOT EDIT. +// versions: +// - protoc-gen-go-grpc v1.3.0 +// - protoc v4.25.1 +// source: container/grpc/service.proto + +package container + +import ( + context "context" + grpc "google.golang.org/grpc" + codes "google.golang.org/grpc/codes" + status "google.golang.org/grpc/status" +) + +// This is a compile-time assertion to ensure that this generated file +// is compatible with the grpc package it is being compiled against. +// Requires gRPC-Go v1.32.0 or later. +const _ = grpc.SupportPackageIsVersion7 + +const ( + ContainerService_Put_FullMethodName = "/neo.fs.v2.container.ContainerService/Put" + ContainerService_Delete_FullMethodName = "/neo.fs.v2.container.ContainerService/Delete" + ContainerService_Get_FullMethodName = "/neo.fs.v2.container.ContainerService/Get" + ContainerService_List_FullMethodName = "/neo.fs.v2.container.ContainerService/List" + ContainerService_SetExtendedACL_FullMethodName = "/neo.fs.v2.container.ContainerService/SetExtendedACL" + ContainerService_GetExtendedACL_FullMethodName = "/neo.fs.v2.container.ContainerService/GetExtendedACL" + ContainerService_AnnounceUsedSpace_FullMethodName = "/neo.fs.v2.container.ContainerService/AnnounceUsedSpace" +) + +// ContainerServiceClient is the client API for ContainerService service. +// +// For semantics around ctx use and closing/ending streaming RPCs, please refer to https://pkg.go.dev/google.golang.org/grpc/?tab=doc#ClientConn.NewStream. +type ContainerServiceClient interface { + // `Put` invokes `Container` smart contract's `Put` method and returns + // response immediately. After a new block is issued in sidechain, request is + // verified by Inner Ring nodes. After one more block in sidechain, the container + // is added into smart contract storage. + // + // Statuses: + // - **OK** (0, SECTION_SUCCESS): \ + // request to save the container has been sent to the sidechain; + // - Common failures (SECTION_FAILURE_COMMON). + Put(ctx context.Context, in *PutRequest, opts ...grpc.CallOption) (*PutResponse, error) + // `Delete` invokes `Container` smart contract's `Delete` method and returns + // response immediately. After a new block is issued in sidechain, request is + // verified by Inner Ring nodes. After one more block in sidechain, the container + // is added into smart contract storage. + // NOTE: a container deletion leads to the removal of every object in that + // container, regardless of any restrictions on the object removal (e.g. lock/locked + // object would be also removed). + // + // Statuses: + // - **OK** (0, SECTION_SUCCESS): \ + // request to remove the container has been sent to the sidechain; + // - Common failures (SECTION_FAILURE_COMMON). + Delete(ctx context.Context, in *DeleteRequest, opts ...grpc.CallOption) (*DeleteResponse, error) + // Returns container structure from `Container` smart contract storage. + // + // Statuses: + // - **OK** (0, SECTION_SUCCESS): \ + // container has been successfully read; + // - Common failures (SECTION_FAILURE_COMMON); + // - **CONTAINER_NOT_FOUND** (3072, SECTION_CONTAINER): \ + // requested container not found. + Get(ctx context.Context, in *GetRequest, opts ...grpc.CallOption) (*GetResponse, error) + // Returns all owner's containers from 'Container` smart contract' storage. + // + // Statuses: + // - **OK** (0, SECTION_SUCCESS): \ + // container list has been successfully read; + // - Common failures (SECTION_FAILURE_COMMON). + List(ctx context.Context, in *ListRequest, opts ...grpc.CallOption) (*ListResponse, error) + // Invokes 'SetEACL' method of 'Container` smart contract and returns response + // immediately. After one more block in sidechain, changes in an Extended ACL are + // added into smart contract storage. + // + // Statuses: + // - **OK** (0, SECTION_SUCCESS): \ + // request to save container eACL has been sent to the sidechain; + // - Common failures (SECTION_FAILURE_COMMON). + SetExtendedACL(ctx context.Context, in *SetExtendedACLRequest, opts ...grpc.CallOption) (*SetExtendedACLResponse, error) + // Returns Extended ACL table and signature from `Container` smart contract + // storage. + // + // Statuses: + // - **OK** (0, SECTION_SUCCESS): \ + // container eACL has been successfully read; + // - Common failures (SECTION_FAILURE_COMMON); + // - **CONTAINER_NOT_FOUND** (3072, SECTION_CONTAINER): \ + // container not found; + // - **EACL_NOT_FOUND** (3073, SECTION_CONTAINER): \ + // eACL table not found. + GetExtendedACL(ctx context.Context, in *GetExtendedACLRequest, opts ...grpc.CallOption) (*GetExtendedACLResponse, error) + // Announces the space values used by the container for P2P synchronization. + // + // Statuses: + // - **OK** (0, SECTION_SUCCESS): \ + // estimation of used space has been successfully announced; + // - Common failures (SECTION_FAILURE_COMMON). + AnnounceUsedSpace(ctx context.Context, in *AnnounceUsedSpaceRequest, opts ...grpc.CallOption) (*AnnounceUsedSpaceResponse, error) +} + +type containerServiceClient struct { + cc grpc.ClientConnInterface +} + +func NewContainerServiceClient(cc grpc.ClientConnInterface) ContainerServiceClient { + return &containerServiceClient{cc} +} + +func (c *containerServiceClient) Put(ctx context.Context, in *PutRequest, opts ...grpc.CallOption) (*PutResponse, error) { + out := new(PutResponse) + err := c.cc.Invoke(ctx, ContainerService_Put_FullMethodName, in, out, opts...) + if err != nil { + return nil, err + } + return out, nil +} + +func (c *containerServiceClient) Delete(ctx context.Context, in *DeleteRequest, opts ...grpc.CallOption) (*DeleteResponse, error) { + out := new(DeleteResponse) + err := c.cc.Invoke(ctx, ContainerService_Delete_FullMethodName, in, out, opts...) + if err != nil { + return nil, err + } + return out, nil +} + +func (c *containerServiceClient) Get(ctx context.Context, in *GetRequest, opts ...grpc.CallOption) (*GetResponse, error) { + out := new(GetResponse) + err := c.cc.Invoke(ctx, ContainerService_Get_FullMethodName, in, out, opts...) + if err != nil { + return nil, err + } + return out, nil +} + +func (c *containerServiceClient) List(ctx context.Context, in *ListRequest, opts ...grpc.CallOption) (*ListResponse, error) { + out := new(ListResponse) + err := c.cc.Invoke(ctx, ContainerService_List_FullMethodName, in, out, opts...) + if err != nil { + return nil, err + } + return out, nil +} + +func (c *containerServiceClient) SetExtendedACL(ctx context.Context, in *SetExtendedACLRequest, opts ...grpc.CallOption) (*SetExtendedACLResponse, error) { + out := new(SetExtendedACLResponse) + err := c.cc.Invoke(ctx, ContainerService_SetExtendedACL_FullMethodName, in, out, opts...) + if err != nil { + return nil, err + } + return out, nil +} + +func (c *containerServiceClient) GetExtendedACL(ctx context.Context, in *GetExtendedACLRequest, opts ...grpc.CallOption) (*GetExtendedACLResponse, error) { + out := new(GetExtendedACLResponse) + err := c.cc.Invoke(ctx, ContainerService_GetExtendedACL_FullMethodName, in, out, opts...) + if err != nil { + return nil, err + } + return out, nil +} + +func (c *containerServiceClient) AnnounceUsedSpace(ctx context.Context, in *AnnounceUsedSpaceRequest, opts ...grpc.CallOption) (*AnnounceUsedSpaceResponse, error) { + out := new(AnnounceUsedSpaceResponse) + err := c.cc.Invoke(ctx, ContainerService_AnnounceUsedSpace_FullMethodName, in, out, opts...) + if err != nil { + return nil, err + } + return out, nil +} + +// ContainerServiceServer is the server API for ContainerService service. +// All implementations should embed UnimplementedContainerServiceServer +// for forward compatibility +type ContainerServiceServer interface { + // `Put` invokes `Container` smart contract's `Put` method and returns + // response immediately. After a new block is issued in sidechain, request is + // verified by Inner Ring nodes. After one more block in sidechain, the container + // is added into smart contract storage. + // + // Statuses: + // - **OK** (0, SECTION_SUCCESS): \ + // request to save the container has been sent to the sidechain; + // - Common failures (SECTION_FAILURE_COMMON). + Put(context.Context, *PutRequest) (*PutResponse, error) + // `Delete` invokes `Container` smart contract's `Delete` method and returns + // response immediately. After a new block is issued in sidechain, request is + // verified by Inner Ring nodes. After one more block in sidechain, the container + // is added into smart contract storage. + // NOTE: a container deletion leads to the removal of every object in that + // container, regardless of any restrictions on the object removal (e.g. lock/locked + // object would be also removed). + // + // Statuses: + // - **OK** (0, SECTION_SUCCESS): \ + // request to remove the container has been sent to the sidechain; + // - Common failures (SECTION_FAILURE_COMMON). + Delete(context.Context, *DeleteRequest) (*DeleteResponse, error) + // Returns container structure from `Container` smart contract storage. + // + // Statuses: + // - **OK** (0, SECTION_SUCCESS): \ + // container has been successfully read; + // - Common failures (SECTION_FAILURE_COMMON); + // - **CONTAINER_NOT_FOUND** (3072, SECTION_CONTAINER): \ + // requested container not found. + Get(context.Context, *GetRequest) (*GetResponse, error) + // Returns all owner's containers from 'Container` smart contract' storage. + // + // Statuses: + // - **OK** (0, SECTION_SUCCESS): \ + // container list has been successfully read; + // - Common failures (SECTION_FAILURE_COMMON). + List(context.Context, *ListRequest) (*ListResponse, error) + // Invokes 'SetEACL' method of 'Container` smart contract and returns response + // immediately. After one more block in sidechain, changes in an Extended ACL are + // added into smart contract storage. + // + // Statuses: + // - **OK** (0, SECTION_SUCCESS): \ + // request to save container eACL has been sent to the sidechain; + // - Common failures (SECTION_FAILURE_COMMON). + SetExtendedACL(context.Context, *SetExtendedACLRequest) (*SetExtendedACLResponse, error) + // Returns Extended ACL table and signature from `Container` smart contract + // storage. + // + // Statuses: + // - **OK** (0, SECTION_SUCCESS): \ + // container eACL has been successfully read; + // - Common failures (SECTION_FAILURE_COMMON); + // - **CONTAINER_NOT_FOUND** (3072, SECTION_CONTAINER): \ + // container not found; + // - **EACL_NOT_FOUND** (3073, SECTION_CONTAINER): \ + // eACL table not found. + GetExtendedACL(context.Context, *GetExtendedACLRequest) (*GetExtendedACLResponse, error) + // Announces the space values used by the container for P2P synchronization. + // + // Statuses: + // - **OK** (0, SECTION_SUCCESS): \ + // estimation of used space has been successfully announced; + // - Common failures (SECTION_FAILURE_COMMON). + AnnounceUsedSpace(context.Context, *AnnounceUsedSpaceRequest) (*AnnounceUsedSpaceResponse, error) +} + +// UnimplementedContainerServiceServer should be embedded to have forward compatible implementations. +type UnimplementedContainerServiceServer struct { +} + +func (UnimplementedContainerServiceServer) Put(context.Context, *PutRequest) (*PutResponse, error) { + return nil, status.Errorf(codes.Unimplemented, "method Put not implemented") +} +func (UnimplementedContainerServiceServer) Delete(context.Context, *DeleteRequest) (*DeleteResponse, error) { + return nil, status.Errorf(codes.Unimplemented, "method Delete not implemented") +} +func (UnimplementedContainerServiceServer) Get(context.Context, *GetRequest) (*GetResponse, error) { + return nil, status.Errorf(codes.Unimplemented, "method Get not implemented") +} +func (UnimplementedContainerServiceServer) List(context.Context, *ListRequest) (*ListResponse, error) { + return nil, status.Errorf(codes.Unimplemented, "method List not implemented") +} +func (UnimplementedContainerServiceServer) SetExtendedACL(context.Context, *SetExtendedACLRequest) (*SetExtendedACLResponse, error) { + return nil, status.Errorf(codes.Unimplemented, "method SetExtendedACL not implemented") +} +func (UnimplementedContainerServiceServer) GetExtendedACL(context.Context, *GetExtendedACLRequest) (*GetExtendedACLResponse, error) { + return nil, status.Errorf(codes.Unimplemented, "method GetExtendedACL not implemented") +} +func (UnimplementedContainerServiceServer) AnnounceUsedSpace(context.Context, *AnnounceUsedSpaceRequest) (*AnnounceUsedSpaceResponse, error) { + return nil, status.Errorf(codes.Unimplemented, "method AnnounceUsedSpace not implemented") +} + +// UnsafeContainerServiceServer may be embedded to opt out of forward compatibility for this service. +// Use of this interface is not recommended, as added methods to ContainerServiceServer will +// result in compilation errors. +type UnsafeContainerServiceServer interface { + mustEmbedUnimplementedContainerServiceServer() +} + +func RegisterContainerServiceServer(s grpc.ServiceRegistrar, srv ContainerServiceServer) { + s.RegisterService(&ContainerService_ServiceDesc, srv) +} + +func _ContainerService_Put_Handler(srv interface{}, ctx context.Context, dec func(interface{}) error, interceptor grpc.UnaryServerInterceptor) (interface{}, error) { + in := new(PutRequest) + if err := dec(in); err != nil { + return nil, err + } + if interceptor == nil { + return srv.(ContainerServiceServer).Put(ctx, in) + } + info := &grpc.UnaryServerInfo{ + Server: srv, + FullMethod: ContainerService_Put_FullMethodName, + } + handler := func(ctx context.Context, req interface{}) (interface{}, error) { + return srv.(ContainerServiceServer).Put(ctx, req.(*PutRequest)) + } + return interceptor(ctx, in, info, handler) +} + +func _ContainerService_Delete_Handler(srv interface{}, ctx context.Context, dec func(interface{}) error, interceptor grpc.UnaryServerInterceptor) (interface{}, error) { + in := new(DeleteRequest) + if err := dec(in); err != nil { + return nil, err + } + if interceptor == nil { + return srv.(ContainerServiceServer).Delete(ctx, in) + } + info := &grpc.UnaryServerInfo{ + Server: srv, + FullMethod: ContainerService_Delete_FullMethodName, + } + handler := func(ctx context.Context, req interface{}) (interface{}, error) { + return srv.(ContainerServiceServer).Delete(ctx, req.(*DeleteRequest)) + } + return interceptor(ctx, in, info, handler) +} + +func _ContainerService_Get_Handler(srv interface{}, ctx context.Context, dec func(interface{}) error, interceptor grpc.UnaryServerInterceptor) (interface{}, error) { + in := new(GetRequest) + if err := dec(in); err != nil { + return nil, err + } + if interceptor == nil { + return srv.(ContainerServiceServer).Get(ctx, in) + } + info := &grpc.UnaryServerInfo{ + Server: srv, + FullMethod: ContainerService_Get_FullMethodName, + } + handler := func(ctx context.Context, req interface{}) (interface{}, error) { + return srv.(ContainerServiceServer).Get(ctx, req.(*GetRequest)) + } + return interceptor(ctx, in, info, handler) +} + +func _ContainerService_List_Handler(srv interface{}, ctx context.Context, dec func(interface{}) error, interceptor grpc.UnaryServerInterceptor) (interface{}, error) { + in := new(ListRequest) + if err := dec(in); err != nil { + return nil, err + } + if interceptor == nil { + return srv.(ContainerServiceServer).List(ctx, in) + } + info := &grpc.UnaryServerInfo{ + Server: srv, + FullMethod: ContainerService_List_FullMethodName, + } + handler := func(ctx context.Context, req interface{}) (interface{}, error) { + return srv.(ContainerServiceServer).List(ctx, req.(*ListRequest)) + } + return interceptor(ctx, in, info, handler) +} + +func _ContainerService_SetExtendedACL_Handler(srv interface{}, ctx context.Context, dec func(interface{}) error, interceptor grpc.UnaryServerInterceptor) (interface{}, error) { + in := new(SetExtendedACLRequest) + if err := dec(in); err != nil { + return nil, err + } + if interceptor == nil { + return srv.(ContainerServiceServer).SetExtendedACL(ctx, in) + } + info := &grpc.UnaryServerInfo{ + Server: srv, + FullMethod: ContainerService_SetExtendedACL_FullMethodName, + } + handler := func(ctx context.Context, req interface{}) (interface{}, error) { + return srv.(ContainerServiceServer).SetExtendedACL(ctx, req.(*SetExtendedACLRequest)) + } + return interceptor(ctx, in, info, handler) +} + +func _ContainerService_GetExtendedACL_Handler(srv interface{}, ctx context.Context, dec func(interface{}) error, interceptor grpc.UnaryServerInterceptor) (interface{}, error) { + in := new(GetExtendedACLRequest) + if err := dec(in); err != nil { + return nil, err + } + if interceptor == nil { + return srv.(ContainerServiceServer).GetExtendedACL(ctx, in) + } + info := &grpc.UnaryServerInfo{ + Server: srv, + FullMethod: ContainerService_GetExtendedACL_FullMethodName, + } + handler := func(ctx context.Context, req interface{}) (interface{}, error) { + return srv.(ContainerServiceServer).GetExtendedACL(ctx, req.(*GetExtendedACLRequest)) + } + return interceptor(ctx, in, info, handler) +} + +func _ContainerService_AnnounceUsedSpace_Handler(srv interface{}, ctx context.Context, dec func(interface{}) error, interceptor grpc.UnaryServerInterceptor) (interface{}, error) { + in := new(AnnounceUsedSpaceRequest) + if err := dec(in); err != nil { + return nil, err + } + if interceptor == nil { + return srv.(ContainerServiceServer).AnnounceUsedSpace(ctx, in) + } + info := &grpc.UnaryServerInfo{ + Server: srv, + FullMethod: ContainerService_AnnounceUsedSpace_FullMethodName, + } + handler := func(ctx context.Context, req interface{}) (interface{}, error) { + return srv.(ContainerServiceServer).AnnounceUsedSpace(ctx, req.(*AnnounceUsedSpaceRequest)) + } + return interceptor(ctx, in, info, handler) +} + +// ContainerService_ServiceDesc is the grpc.ServiceDesc for ContainerService service. +// It's only intended for direct use with grpc.RegisterService, +// and not to be introspected or modified (even as a copy) +var ContainerService_ServiceDesc = grpc.ServiceDesc{ + ServiceName: "neo.fs.v2.container.ContainerService", + HandlerType: (*ContainerServiceServer)(nil), + Methods: []grpc.MethodDesc{ + { + MethodName: "Put", + Handler: _ContainerService_Put_Handler, + }, + { + MethodName: "Delete", + Handler: _ContainerService_Delete_Handler, + }, + { + MethodName: "Get", + Handler: _ContainerService_Get_Handler, + }, + { + MethodName: "List", + Handler: _ContainerService_List_Handler, + }, + { + MethodName: "SetExtendedACL", + Handler: _ContainerService_SetExtendedACL_Handler, + }, + { + MethodName: "GetExtendedACL", + Handler: _ContainerService_GetExtendedACL_Handler, + }, + { + MethodName: "AnnounceUsedSpace", + Handler: _ContainerService_AnnounceUsedSpace_Handler, + }, + }, + Streams: []grpc.StreamDesc{}, + Metadata: "container/grpc/service.proto", +} diff --git a/api/container/types.pb.go b/api/container/types.pb.go new file mode 100644 index 000000000..5fcf2469e --- /dev/null +++ b/api/container/types.pb.go @@ -0,0 +1,336 @@ +// Code generated by protoc-gen-go. DO NOT EDIT. +// versions: +// protoc-gen-go v1.32.0 +// protoc v4.25.1 +// source: container/grpc/types.proto + +package container + +import ( + reflect "reflect" + sync "sync" + + "github.com/nspcc-dev/neofs-sdk-go/api/netmap" + "github.com/nspcc-dev/neofs-sdk-go/api/refs" + protoreflect "google.golang.org/protobuf/reflect/protoreflect" + protoimpl "google.golang.org/protobuf/runtime/protoimpl" +) + +const ( + // Verify that this generated code is sufficiently up-to-date. + _ = protoimpl.EnforceVersion(20 - protoimpl.MinVersion) + // Verify that runtime/protoimpl is sufficiently up-to-date. + _ = protoimpl.EnforceVersion(protoimpl.MaxVersion - 20) +) + +// Container is a structure that defines object placement behaviour. Objects can +// be stored only within containers. They define placement rule, attributes and +// access control information. An ID of a container is a 32 byte long SHA256 hash +// of stable-marshalled container message. +type Container struct { + state protoimpl.MessageState + sizeCache protoimpl.SizeCache + unknownFields protoimpl.UnknownFields + + // Container format version. Effectively, the version of API library used to + // create the container. + Version *refs.Version `protobuf:"bytes,1,opt,name=version,proto3" json:"version,omitempty"` + // Identifier of the container owner + OwnerId *refs.OwnerID `protobuf:"bytes,2,opt,name=owner_id,json=ownerID,proto3" json:"owner_id,omitempty"` + // Nonce is a 16 byte UUIDv4, used to avoid collisions of `ContainerID`s + Nonce []byte `protobuf:"bytes,3,opt,name=nonce,proto3" json:"nonce,omitempty"` + // `BasicACL` contains access control rules for the owner, system and others groups, + // as well as permission bits for `BearerToken` and `Extended ACL` + BasicAcl uint32 `protobuf:"varint,4,opt,name=basic_acl,json=basicACL,proto3" json:"basic_acl,omitempty"` + // Attributes represent immutable container's meta data + Attributes []*Container_Attribute `protobuf:"bytes,5,rep,name=attributes,proto3" json:"attributes,omitempty"` + // Placement policy for the object inside the container + PlacementPolicy *netmap.PlacementPolicy `protobuf:"bytes,6,opt,name=placement_policy,json=placementPolicy,proto3" json:"placement_policy,omitempty"` +} + +func (x *Container) Reset() { + *x = Container{} + if protoimpl.UnsafeEnabled { + mi := &file_container_grpc_types_proto_msgTypes[0] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) + } +} + +func (x *Container) String() string { + return protoimpl.X.MessageStringOf(x) +} + +func (*Container) ProtoMessage() {} + +func (x *Container) ProtoReflect() protoreflect.Message { + mi := &file_container_grpc_types_proto_msgTypes[0] + if protoimpl.UnsafeEnabled && x != nil { + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + if ms.LoadMessageInfo() == nil { + ms.StoreMessageInfo(mi) + } + return ms + } + return mi.MessageOf(x) +} + +// Deprecated: Use Container.ProtoReflect.Descriptor instead. +func (*Container) Descriptor() ([]byte, []int) { + return file_container_grpc_types_proto_rawDescGZIP(), []int{0} +} + +func (x *Container) GetVersion() *refs.Version { + if x != nil { + return x.Version + } + return nil +} + +func (x *Container) GetOwnerId() *refs.OwnerID { + if x != nil { + return x.OwnerId + } + return nil +} + +func (x *Container) GetNonce() []byte { + if x != nil { + return x.Nonce + } + return nil +} + +func (x *Container) GetBasicAcl() uint32 { + if x != nil { + return x.BasicAcl + } + return 0 +} + +func (x *Container) GetAttributes() []*Container_Attribute { + if x != nil { + return x.Attributes + } + return nil +} + +func (x *Container) GetPlacementPolicy() *netmap.PlacementPolicy { + if x != nil { + return x.PlacementPolicy + } + return nil +} + +// `Attribute` is a user-defined Key-Value metadata pair attached to the +// container. Container attributes are immutable. They are set at the moment of +// container creation and can never be added or updated. +// +// Key name must be a container-unique valid UTF-8 string. Value can't be +// empty. Containers with duplicated attribute names or attributes with empty +// values will be considered invalid. +// +// There are some "well-known" attributes affecting system behaviour: +// +// - __NEOFS__SUBNET \ +// DEPRECATED. Was used for a string ID of a container's storage subnet. +// Currently doesn't affect anything. +// - __NEOFS__NAME \ +// String of a human-friendly container name registered as a domain in +// NNS contract. +// - __NEOFS__ZONE \ +// String of a zone for `__NEOFS__NAME`. Used as a TLD of a domain name in NNS +// contract. If no zone is specified, use default zone: `container`. +// - __NEOFS__DISABLE_HOMOMORPHIC_HASHING \ +// Disables homomorphic hashing for the container if the value equals "true" string. +// Any other values are interpreted as missing attribute. Container could be +// accepted in a NeoFS network only if the global network hashing configuration +// value corresponds with that attribute's value. After container inclusion, network +// setting is ignored. +// +// And some well-known attributes used by applications only: +// +// - Name \ +// Human-friendly name +// - Timestamp \ +// User-defined local time of container creation in Unix Timestamp format +type Container_Attribute struct { + state protoimpl.MessageState + sizeCache protoimpl.SizeCache + unknownFields protoimpl.UnknownFields + + // Attribute name key + Key string `protobuf:"bytes,1,opt,name=key,proto3" json:"key,omitempty"` + // Attribute value + Value string `protobuf:"bytes,2,opt,name=value,proto3" json:"value,omitempty"` +} + +func (x *Container_Attribute) Reset() { + *x = Container_Attribute{} + if protoimpl.UnsafeEnabled { + mi := &file_container_grpc_types_proto_msgTypes[1] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) + } +} + +func (x *Container_Attribute) String() string { + return protoimpl.X.MessageStringOf(x) +} + +func (*Container_Attribute) ProtoMessage() {} + +func (x *Container_Attribute) ProtoReflect() protoreflect.Message { + mi := &file_container_grpc_types_proto_msgTypes[1] + if protoimpl.UnsafeEnabled && x != nil { + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + if ms.LoadMessageInfo() == nil { + ms.StoreMessageInfo(mi) + } + return ms + } + return mi.MessageOf(x) +} + +// Deprecated: Use Container_Attribute.ProtoReflect.Descriptor instead. +func (*Container_Attribute) Descriptor() ([]byte, []int) { + return file_container_grpc_types_proto_rawDescGZIP(), []int{0, 0} +} + +func (x *Container_Attribute) GetKey() string { + if x != nil { + return x.Key + } + return "" +} + +func (x *Container_Attribute) GetValue() string { + if x != nil { + return x.Value + } + return "" +} + +var File_container_grpc_types_proto protoreflect.FileDescriptor + +var file_container_grpc_types_proto_rawDesc = []byte{ + 0x0a, 0x1a, 0x63, 0x6f, 0x6e, 0x74, 0x61, 0x69, 0x6e, 0x65, 0x72, 0x2f, 0x67, 0x72, 0x70, 0x63, + 0x2f, 0x74, 0x79, 0x70, 0x65, 0x73, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x12, 0x13, 0x6e, 0x65, + 0x6f, 0x2e, 0x66, 0x73, 0x2e, 0x76, 0x32, 0x2e, 0x63, 0x6f, 0x6e, 0x74, 0x61, 0x69, 0x6e, 0x65, + 0x72, 0x1a, 0x17, 0x6e, 0x65, 0x74, 0x6d, 0x61, 0x70, 0x2f, 0x67, 0x72, 0x70, 0x63, 0x2f, 0x74, + 0x79, 0x70, 0x65, 0x73, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x1a, 0x15, 0x72, 0x65, 0x66, 0x73, + 0x2f, 0x67, 0x72, 0x70, 0x63, 0x2f, 0x74, 0x79, 0x70, 0x65, 0x73, 0x2e, 0x70, 0x72, 0x6f, 0x74, + 0x6f, 0x22, 0xf2, 0x02, 0x0a, 0x09, 0x43, 0x6f, 0x6e, 0x74, 0x61, 0x69, 0x6e, 0x65, 0x72, 0x12, + 0x31, 0x0a, 0x07, 0x76, 0x65, 0x72, 0x73, 0x69, 0x6f, 0x6e, 0x18, 0x01, 0x20, 0x01, 0x28, 0x0b, + 0x32, 0x17, 0x2e, 0x6e, 0x65, 0x6f, 0x2e, 0x66, 0x73, 0x2e, 0x76, 0x32, 0x2e, 0x72, 0x65, 0x66, + 0x73, 0x2e, 0x56, 0x65, 0x72, 0x73, 0x69, 0x6f, 0x6e, 0x52, 0x07, 0x76, 0x65, 0x72, 0x73, 0x69, + 0x6f, 0x6e, 0x12, 0x32, 0x0a, 0x08, 0x6f, 0x77, 0x6e, 0x65, 0x72, 0x5f, 0x69, 0x64, 0x18, 0x02, + 0x20, 0x01, 0x28, 0x0b, 0x32, 0x17, 0x2e, 0x6e, 0x65, 0x6f, 0x2e, 0x66, 0x73, 0x2e, 0x76, 0x32, + 0x2e, 0x72, 0x65, 0x66, 0x73, 0x2e, 0x4f, 0x77, 0x6e, 0x65, 0x72, 0x49, 0x44, 0x52, 0x07, 0x6f, + 0x77, 0x6e, 0x65, 0x72, 0x49, 0x44, 0x12, 0x14, 0x0a, 0x05, 0x6e, 0x6f, 0x6e, 0x63, 0x65, 0x18, + 0x03, 0x20, 0x01, 0x28, 0x0c, 0x52, 0x05, 0x6e, 0x6f, 0x6e, 0x63, 0x65, 0x12, 0x1b, 0x0a, 0x09, + 0x62, 0x61, 0x73, 0x69, 0x63, 0x5f, 0x61, 0x63, 0x6c, 0x18, 0x04, 0x20, 0x01, 0x28, 0x0d, 0x52, + 0x08, 0x62, 0x61, 0x73, 0x69, 0x63, 0x41, 0x43, 0x4c, 0x12, 0x48, 0x0a, 0x0a, 0x61, 0x74, 0x74, + 0x72, 0x69, 0x62, 0x75, 0x74, 0x65, 0x73, 0x18, 0x05, 0x20, 0x03, 0x28, 0x0b, 0x32, 0x28, 0x2e, + 0x6e, 0x65, 0x6f, 0x2e, 0x66, 0x73, 0x2e, 0x76, 0x32, 0x2e, 0x63, 0x6f, 0x6e, 0x74, 0x61, 0x69, + 0x6e, 0x65, 0x72, 0x2e, 0x43, 0x6f, 0x6e, 0x74, 0x61, 0x69, 0x6e, 0x65, 0x72, 0x2e, 0x41, 0x74, + 0x74, 0x72, 0x69, 0x62, 0x75, 0x74, 0x65, 0x52, 0x0a, 0x61, 0x74, 0x74, 0x72, 0x69, 0x62, 0x75, + 0x74, 0x65, 0x73, 0x12, 0x4c, 0x0a, 0x10, 0x70, 0x6c, 0x61, 0x63, 0x65, 0x6d, 0x65, 0x6e, 0x74, + 0x5f, 0x70, 0x6f, 0x6c, 0x69, 0x63, 0x79, 0x18, 0x06, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x21, 0x2e, + 0x6e, 0x65, 0x6f, 0x2e, 0x66, 0x73, 0x2e, 0x76, 0x32, 0x2e, 0x6e, 0x65, 0x74, 0x6d, 0x61, 0x70, + 0x2e, 0x50, 0x6c, 0x61, 0x63, 0x65, 0x6d, 0x65, 0x6e, 0x74, 0x50, 0x6f, 0x6c, 0x69, 0x63, 0x79, + 0x52, 0x0f, 0x70, 0x6c, 0x61, 0x63, 0x65, 0x6d, 0x65, 0x6e, 0x74, 0x50, 0x6f, 0x6c, 0x69, 0x63, + 0x79, 0x1a, 0x33, 0x0a, 0x09, 0x41, 0x74, 0x74, 0x72, 0x69, 0x62, 0x75, 0x74, 0x65, 0x12, 0x10, + 0x0a, 0x03, 0x6b, 0x65, 0x79, 0x18, 0x01, 0x20, 0x01, 0x28, 0x09, 0x52, 0x03, 0x6b, 0x65, 0x79, + 0x12, 0x14, 0x0a, 0x05, 0x76, 0x61, 0x6c, 0x75, 0x65, 0x18, 0x02, 0x20, 0x01, 0x28, 0x09, 0x52, + 0x05, 0x76, 0x61, 0x6c, 0x75, 0x65, 0x42, 0x5f, 0x5a, 0x3d, 0x67, 0x69, 0x74, 0x68, 0x75, 0x62, + 0x2e, 0x63, 0x6f, 0x6d, 0x2f, 0x6e, 0x73, 0x70, 0x63, 0x63, 0x2d, 0x64, 0x65, 0x76, 0x2f, 0x6e, + 0x65, 0x6f, 0x66, 0x73, 0x2d, 0x61, 0x70, 0x69, 0x2d, 0x67, 0x6f, 0x2f, 0x76, 0x32, 0x2f, 0x63, + 0x6f, 0x6e, 0x74, 0x61, 0x69, 0x6e, 0x65, 0x72, 0x2f, 0x67, 0x72, 0x70, 0x63, 0x3b, 0x63, 0x6f, + 0x6e, 0x74, 0x61, 0x69, 0x6e, 0x65, 0x72, 0xaa, 0x02, 0x1d, 0x4e, 0x65, 0x6f, 0x2e, 0x46, 0x69, + 0x6c, 0x65, 0x53, 0x74, 0x6f, 0x72, 0x61, 0x67, 0x65, 0x2e, 0x41, 0x50, 0x49, 0x2e, 0x43, 0x6f, + 0x6e, 0x74, 0x61, 0x69, 0x6e, 0x65, 0x72, 0x62, 0x06, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x33, +} + +var ( + file_container_grpc_types_proto_rawDescOnce sync.Once + file_container_grpc_types_proto_rawDescData = file_container_grpc_types_proto_rawDesc +) + +func file_container_grpc_types_proto_rawDescGZIP() []byte { + file_container_grpc_types_proto_rawDescOnce.Do(func() { + file_container_grpc_types_proto_rawDescData = protoimpl.X.CompressGZIP(file_container_grpc_types_proto_rawDescData) + }) + return file_container_grpc_types_proto_rawDescData +} + +var file_container_grpc_types_proto_msgTypes = make([]protoimpl.MessageInfo, 2) +var file_container_grpc_types_proto_goTypes = []interface{}{ + (*Container)(nil), // 0: neo.fs.v2.container.Container + (*Container_Attribute)(nil), // 1: neo.fs.v2.container.Container.Attribute + (*refs.Version)(nil), // 2: neo.fs.v2.refs.Version + (*refs.OwnerID)(nil), // 3: neo.fs.v2.refs.OwnerID + (*netmap.PlacementPolicy)(nil), // 4: neo.fs.v2.netmap.PlacementPolicy +} +var file_container_grpc_types_proto_depIdxs = []int32{ + 2, // 0: neo.fs.v2.container.Container.version:type_name -> neo.fs.v2.refs.Version + 3, // 1: neo.fs.v2.container.Container.owner_id:type_name -> neo.fs.v2.refs.OwnerID + 1, // 2: neo.fs.v2.container.Container.attributes:type_name -> neo.fs.v2.container.Container.Attribute + 4, // 3: neo.fs.v2.container.Container.placement_policy:type_name -> neo.fs.v2.netmap.PlacementPolicy + 4, // [4:4] is the sub-list for method output_type + 4, // [4:4] is the sub-list for method input_type + 4, // [4:4] is the sub-list for extension type_name + 4, // [4:4] is the sub-list for extension extendee + 0, // [0:4] is the sub-list for field type_name +} + +func init() { file_container_grpc_types_proto_init() } +func file_container_grpc_types_proto_init() { + if File_container_grpc_types_proto != nil { + return + } + if !protoimpl.UnsafeEnabled { + file_container_grpc_types_proto_msgTypes[0].Exporter = func(v interface{}, i int) interface{} { + switch v := v.(*Container); i { + case 0: + return &v.state + case 1: + return &v.sizeCache + case 2: + return &v.unknownFields + default: + return nil + } + } + file_container_grpc_types_proto_msgTypes[1].Exporter = func(v interface{}, i int) interface{} { + switch v := v.(*Container_Attribute); i { + case 0: + return &v.state + case 1: + return &v.sizeCache + case 2: + return &v.unknownFields + default: + return nil + } + } + } + type x struct{} + out := protoimpl.TypeBuilder{ + File: protoimpl.DescBuilder{ + GoPackagePath: reflect.TypeOf(x{}).PkgPath(), + RawDescriptor: file_container_grpc_types_proto_rawDesc, + NumEnums: 0, + NumMessages: 2, + NumExtensions: 0, + NumServices: 0, + }, + GoTypes: file_container_grpc_types_proto_goTypes, + DependencyIndexes: file_container_grpc_types_proto_depIdxs, + MessageInfos: file_container_grpc_types_proto_msgTypes, + }.Build() + File_container_grpc_types_proto = out.File + file_container_grpc_types_proto_rawDesc = nil + file_container_grpc_types_proto_goTypes = nil + file_container_grpc_types_proto_depIdxs = nil +} diff --git a/api/link/types.pb.go b/api/link/types.pb.go new file mode 100644 index 000000000..a6ff4eb55 --- /dev/null +++ b/api/link/types.pb.go @@ -0,0 +1,241 @@ +// Code generated by protoc-gen-go. DO NOT EDIT. +// versions: +// protoc-gen-go v1.32.0 +// protoc v4.25.1 +// source: link/grpc/types.proto + +package link + +import ( + reflect "reflect" + sync "sync" + + "github.com/nspcc-dev/neofs-sdk-go/api/refs" + protoreflect "google.golang.org/protobuf/reflect/protoreflect" + protoimpl "google.golang.org/protobuf/runtime/protoimpl" +) + +const ( + // Verify that this generated code is sufficiently up-to-date. + _ = protoimpl.EnforceVersion(20 - protoimpl.MinVersion) + // Verify that runtime/protoimpl is sufficiently up-to-date. + _ = protoimpl.EnforceVersion(protoimpl.MaxVersion - 20) +) + +// Link is a payload of helper objects that contain the full list of the split +// chain objects' IDs. It is created only after the whole split chain is known +// and signed. This object is the only object that refers to every "child object" +// ID. It is NOT required for the original object assembling. It MUST have ALL +// the "child objects" IDs. Child objects MUST be ordered according to the +// original payload split, meaning the first payload part holder MUST be placed +// at the first place in the corresponding link object. Sizes MUST NOT be omitted +// and MUST be a real object payload size in bytes. +type Link struct { + state protoimpl.MessageState + sizeCache protoimpl.SizeCache + unknownFields protoimpl.UnknownFields + + // Full list of the "child" object descriptors. + Children []*Link_MeasuredObject `protobuf:"bytes,1,rep,name=children,proto3" json:"children,omitempty"` +} + +func (x *Link) Reset() { + *x = Link{} + if protoimpl.UnsafeEnabled { + mi := &file_link_grpc_types_proto_msgTypes[0] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) + } +} + +func (x *Link) String() string { + return protoimpl.X.MessageStringOf(x) +} + +func (*Link) ProtoMessage() {} + +func (x *Link) ProtoReflect() protoreflect.Message { + mi := &file_link_grpc_types_proto_msgTypes[0] + if protoimpl.UnsafeEnabled && x != nil { + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + if ms.LoadMessageInfo() == nil { + ms.StoreMessageInfo(mi) + } + return ms + } + return mi.MessageOf(x) +} + +// Deprecated: Use Link.ProtoReflect.Descriptor instead. +func (*Link) Descriptor() ([]byte, []int) { + return file_link_grpc_types_proto_rawDescGZIP(), []int{0} +} + +func (x *Link) GetChildren() []*Link_MeasuredObject { + if x != nil { + return x.Children + } + return nil +} + +// Object ID with its object's payload size. +type Link_MeasuredObject struct { + state protoimpl.MessageState + sizeCache protoimpl.SizeCache + unknownFields protoimpl.UnknownFields + + // Object ID. + Id *refs.ObjectID `protobuf:"bytes,1,opt,name=id,proto3" json:"id,omitempty"` + // Object size in bytes. + Size uint32 `protobuf:"varint,2,opt,name=size,proto3" json:"size,omitempty"` +} + +func (x *Link_MeasuredObject) Reset() { + *x = Link_MeasuredObject{} + if protoimpl.UnsafeEnabled { + mi := &file_link_grpc_types_proto_msgTypes[1] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) + } +} + +func (x *Link_MeasuredObject) String() string { + return protoimpl.X.MessageStringOf(x) +} + +func (*Link_MeasuredObject) ProtoMessage() {} + +func (x *Link_MeasuredObject) ProtoReflect() protoreflect.Message { + mi := &file_link_grpc_types_proto_msgTypes[1] + if protoimpl.UnsafeEnabled && x != nil { + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + if ms.LoadMessageInfo() == nil { + ms.StoreMessageInfo(mi) + } + return ms + } + return mi.MessageOf(x) +} + +// Deprecated: Use Link_MeasuredObject.ProtoReflect.Descriptor instead. +func (*Link_MeasuredObject) Descriptor() ([]byte, []int) { + return file_link_grpc_types_proto_rawDescGZIP(), []int{0, 0} +} + +func (x *Link_MeasuredObject) GetId() *refs.ObjectID { + if x != nil { + return x.Id + } + return nil +} + +func (x *Link_MeasuredObject) GetSize() uint32 { + if x != nil { + return x.Size + } + return 0 +} + +var File_link_grpc_types_proto protoreflect.FileDescriptor + +var file_link_grpc_types_proto_rawDesc = []byte{ + 0x0a, 0x15, 0x6c, 0x69, 0x6e, 0x6b, 0x2f, 0x67, 0x72, 0x70, 0x63, 0x2f, 0x74, 0x79, 0x70, 0x65, + 0x73, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x12, 0x0e, 0x6e, 0x65, 0x6f, 0x2e, 0x66, 0x73, 0x2e, + 0x76, 0x32, 0x2e, 0x6c, 0x69, 0x6e, 0x6b, 0x1a, 0x15, 0x72, 0x65, 0x66, 0x73, 0x2f, 0x67, 0x72, + 0x70, 0x63, 0x2f, 0x74, 0x79, 0x70, 0x65, 0x73, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x22, 0x97, + 0x01, 0x0a, 0x04, 0x4c, 0x69, 0x6e, 0x6b, 0x12, 0x3f, 0x0a, 0x08, 0x63, 0x68, 0x69, 0x6c, 0x64, + 0x72, 0x65, 0x6e, 0x18, 0x01, 0x20, 0x03, 0x28, 0x0b, 0x32, 0x23, 0x2e, 0x6e, 0x65, 0x6f, 0x2e, + 0x66, 0x73, 0x2e, 0x76, 0x32, 0x2e, 0x6c, 0x69, 0x6e, 0x6b, 0x2e, 0x4c, 0x69, 0x6e, 0x6b, 0x2e, + 0x4d, 0x65, 0x61, 0x73, 0x75, 0x72, 0x65, 0x64, 0x4f, 0x62, 0x6a, 0x65, 0x63, 0x74, 0x52, 0x08, + 0x63, 0x68, 0x69, 0x6c, 0x64, 0x72, 0x65, 0x6e, 0x1a, 0x4e, 0x0a, 0x0e, 0x4d, 0x65, 0x61, 0x73, + 0x75, 0x72, 0x65, 0x64, 0x4f, 0x62, 0x6a, 0x65, 0x63, 0x74, 0x12, 0x28, 0x0a, 0x02, 0x69, 0x64, + 0x18, 0x01, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x18, 0x2e, 0x6e, 0x65, 0x6f, 0x2e, 0x66, 0x73, 0x2e, + 0x76, 0x32, 0x2e, 0x72, 0x65, 0x66, 0x73, 0x2e, 0x4f, 0x62, 0x6a, 0x65, 0x63, 0x74, 0x49, 0x44, + 0x52, 0x02, 0x69, 0x64, 0x12, 0x12, 0x0a, 0x04, 0x73, 0x69, 0x7a, 0x65, 0x18, 0x02, 0x20, 0x01, + 0x28, 0x0d, 0x52, 0x04, 0x73, 0x69, 0x7a, 0x65, 0x42, 0x50, 0x5a, 0x33, 0x67, 0x69, 0x74, 0x68, + 0x75, 0x62, 0x2e, 0x63, 0x6f, 0x6d, 0x2f, 0x6e, 0x73, 0x70, 0x63, 0x63, 0x2d, 0x64, 0x65, 0x76, + 0x2f, 0x6e, 0x65, 0x6f, 0x66, 0x73, 0x2d, 0x61, 0x70, 0x69, 0x2d, 0x67, 0x6f, 0x2f, 0x76, 0x32, + 0x2f, 0x6c, 0x69, 0x6e, 0x6b, 0x2f, 0x67, 0x72, 0x70, 0x63, 0x3b, 0x6c, 0x69, 0x6e, 0x6b, 0xaa, + 0x02, 0x18, 0x4e, 0x65, 0x6f, 0x2e, 0x46, 0x69, 0x6c, 0x65, 0x53, 0x74, 0x6f, 0x72, 0x61, 0x67, + 0x65, 0x2e, 0x41, 0x50, 0x49, 0x2e, 0x4c, 0x69, 0x6e, 0x6b, 0x62, 0x06, 0x70, 0x72, 0x6f, 0x74, + 0x6f, 0x33, +} + +var ( + file_link_grpc_types_proto_rawDescOnce sync.Once + file_link_grpc_types_proto_rawDescData = file_link_grpc_types_proto_rawDesc +) + +func file_link_grpc_types_proto_rawDescGZIP() []byte { + file_link_grpc_types_proto_rawDescOnce.Do(func() { + file_link_grpc_types_proto_rawDescData = protoimpl.X.CompressGZIP(file_link_grpc_types_proto_rawDescData) + }) + return file_link_grpc_types_proto_rawDescData +} + +var file_link_grpc_types_proto_msgTypes = make([]protoimpl.MessageInfo, 2) +var file_link_grpc_types_proto_goTypes = []interface{}{ + (*Link)(nil), // 0: neo.fs.v2.link.Link + (*Link_MeasuredObject)(nil), // 1: neo.fs.v2.link.Link.MeasuredObject + (*refs.ObjectID)(nil), // 2: neo.fs.v2.refs.ObjectID +} +var file_link_grpc_types_proto_depIdxs = []int32{ + 1, // 0: neo.fs.v2.link.Link.children:type_name -> neo.fs.v2.link.Link.MeasuredObject + 2, // 1: neo.fs.v2.link.Link.MeasuredObject.id:type_name -> neo.fs.v2.refs.ObjectID + 2, // [2:2] is the sub-list for method output_type + 2, // [2:2] is the sub-list for method input_type + 2, // [2:2] is the sub-list for extension type_name + 2, // [2:2] is the sub-list for extension extendee + 0, // [0:2] is the sub-list for field type_name +} + +func init() { file_link_grpc_types_proto_init() } +func file_link_grpc_types_proto_init() { + if File_link_grpc_types_proto != nil { + return + } + if !protoimpl.UnsafeEnabled { + file_link_grpc_types_proto_msgTypes[0].Exporter = func(v interface{}, i int) interface{} { + switch v := v.(*Link); i { + case 0: + return &v.state + case 1: + return &v.sizeCache + case 2: + return &v.unknownFields + default: + return nil + } + } + file_link_grpc_types_proto_msgTypes[1].Exporter = func(v interface{}, i int) interface{} { + switch v := v.(*Link_MeasuredObject); i { + case 0: + return &v.state + case 1: + return &v.sizeCache + case 2: + return &v.unknownFields + default: + return nil + } + } + } + type x struct{} + out := protoimpl.TypeBuilder{ + File: protoimpl.DescBuilder{ + GoPackagePath: reflect.TypeOf(x{}).PkgPath(), + RawDescriptor: file_link_grpc_types_proto_rawDesc, + NumEnums: 0, + NumMessages: 2, + NumExtensions: 0, + NumServices: 0, + }, + GoTypes: file_link_grpc_types_proto_goTypes, + DependencyIndexes: file_link_grpc_types_proto_depIdxs, + MessageInfos: file_link_grpc_types_proto_msgTypes, + }.Build() + File_link_grpc_types_proto = out.File + file_link_grpc_types_proto_rawDesc = nil + file_link_grpc_types_proto_goTypes = nil + file_link_grpc_types_proto_depIdxs = nil +} diff --git a/api/lock/types.pb.go b/api/lock/types.pb.go new file mode 100644 index 000000000..ad1ade340 --- /dev/null +++ b/api/lock/types.pb.go @@ -0,0 +1,162 @@ +// Code generated by protoc-gen-go. DO NOT EDIT. +// versions: +// protoc-gen-go v1.32.0 +// protoc v4.25.1 +// source: lock/grpc/types.proto + +package lock + +import ( + reflect "reflect" + sync "sync" + + "github.com/nspcc-dev/neofs-sdk-go/api/refs" + protoreflect "google.golang.org/protobuf/reflect/protoreflect" + protoimpl "google.golang.org/protobuf/runtime/protoimpl" +) + +const ( + // Verify that this generated code is sufficiently up-to-date. + _ = protoimpl.EnforceVersion(20 - protoimpl.MinVersion) + // Verify that runtime/protoimpl is sufficiently up-to-date. + _ = protoimpl.EnforceVersion(protoimpl.MaxVersion - 20) +) + +// Lock objects protects a list of objects from being deleted. The lifetime of a +// lock object is limited similar to regular objects in +// `__NEOFS__EXPIRATION_EPOCH` attribute. Lock object MUST have expiration epoch. +// It is impossible to delete a lock object via ObjectService.Delete RPC call. +// Deleting a container containing lock/locked objects results in their removal +// too, regardless of their expiration epochs. +type Lock struct { + state protoimpl.MessageState + sizeCache protoimpl.SizeCache + unknownFields protoimpl.UnknownFields + + // List of objects to lock. Must not be empty or carry empty IDs. + // All members must be of the `REGULAR` type. + Members []*refs.ObjectID `protobuf:"bytes,1,rep,name=members,proto3" json:"members,omitempty"` +} + +func (x *Lock) Reset() { + *x = Lock{} + if protoimpl.UnsafeEnabled { + mi := &file_lock_grpc_types_proto_msgTypes[0] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) + } +} + +func (x *Lock) String() string { + return protoimpl.X.MessageStringOf(x) +} + +func (*Lock) ProtoMessage() {} + +func (x *Lock) ProtoReflect() protoreflect.Message { + mi := &file_lock_grpc_types_proto_msgTypes[0] + if protoimpl.UnsafeEnabled && x != nil { + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + if ms.LoadMessageInfo() == nil { + ms.StoreMessageInfo(mi) + } + return ms + } + return mi.MessageOf(x) +} + +// Deprecated: Use Lock.ProtoReflect.Descriptor instead. +func (*Lock) Descriptor() ([]byte, []int) { + return file_lock_grpc_types_proto_rawDescGZIP(), []int{0} +} + +func (x *Lock) GetMembers() []*refs.ObjectID { + if x != nil { + return x.Members + } + return nil +} + +var File_lock_grpc_types_proto protoreflect.FileDescriptor + +var file_lock_grpc_types_proto_rawDesc = []byte{ + 0x0a, 0x15, 0x6c, 0x6f, 0x63, 0x6b, 0x2f, 0x67, 0x72, 0x70, 0x63, 0x2f, 0x74, 0x79, 0x70, 0x65, + 0x73, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x12, 0x0e, 0x6e, 0x65, 0x6f, 0x2e, 0x66, 0x73, 0x2e, + 0x76, 0x32, 0x2e, 0x6c, 0x6f, 0x63, 0x6b, 0x1a, 0x15, 0x72, 0x65, 0x66, 0x73, 0x2f, 0x67, 0x72, + 0x70, 0x63, 0x2f, 0x74, 0x79, 0x70, 0x65, 0x73, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x22, 0x3a, + 0x0a, 0x04, 0x4c, 0x6f, 0x63, 0x6b, 0x12, 0x32, 0x0a, 0x07, 0x6d, 0x65, 0x6d, 0x62, 0x65, 0x72, + 0x73, 0x18, 0x01, 0x20, 0x03, 0x28, 0x0b, 0x32, 0x18, 0x2e, 0x6e, 0x65, 0x6f, 0x2e, 0x66, 0x73, + 0x2e, 0x76, 0x32, 0x2e, 0x72, 0x65, 0x66, 0x73, 0x2e, 0x4f, 0x62, 0x6a, 0x65, 0x63, 0x74, 0x49, + 0x44, 0x52, 0x07, 0x6d, 0x65, 0x6d, 0x62, 0x65, 0x72, 0x73, 0x42, 0x50, 0x5a, 0x33, 0x67, 0x69, + 0x74, 0x68, 0x75, 0x62, 0x2e, 0x63, 0x6f, 0x6d, 0x2f, 0x6e, 0x73, 0x70, 0x63, 0x63, 0x2d, 0x64, + 0x65, 0x76, 0x2f, 0x6e, 0x65, 0x6f, 0x66, 0x73, 0x2d, 0x61, 0x70, 0x69, 0x2d, 0x67, 0x6f, 0x2f, + 0x76, 0x32, 0x2f, 0x6c, 0x6f, 0x63, 0x6b, 0x2f, 0x67, 0x72, 0x70, 0x63, 0x3b, 0x6c, 0x6f, 0x63, + 0x6b, 0xaa, 0x02, 0x18, 0x4e, 0x65, 0x6f, 0x2e, 0x46, 0x69, 0x6c, 0x65, 0x53, 0x74, 0x6f, 0x72, + 0x61, 0x67, 0x65, 0x2e, 0x41, 0x50, 0x49, 0x2e, 0x4c, 0x6f, 0x63, 0x6b, 0x62, 0x06, 0x70, 0x72, + 0x6f, 0x74, 0x6f, 0x33, +} + +var ( + file_lock_grpc_types_proto_rawDescOnce sync.Once + file_lock_grpc_types_proto_rawDescData = file_lock_grpc_types_proto_rawDesc +) + +func file_lock_grpc_types_proto_rawDescGZIP() []byte { + file_lock_grpc_types_proto_rawDescOnce.Do(func() { + file_lock_grpc_types_proto_rawDescData = protoimpl.X.CompressGZIP(file_lock_grpc_types_proto_rawDescData) + }) + return file_lock_grpc_types_proto_rawDescData +} + +var file_lock_grpc_types_proto_msgTypes = make([]protoimpl.MessageInfo, 1) +var file_lock_grpc_types_proto_goTypes = []interface{}{ + (*Lock)(nil), // 0: neo.fs.v2.lock.Lock + (*refs.ObjectID)(nil), // 1: neo.fs.v2.refs.ObjectID +} +var file_lock_grpc_types_proto_depIdxs = []int32{ + 1, // 0: neo.fs.v2.lock.Lock.members:type_name -> neo.fs.v2.refs.ObjectID + 1, // [1:1] is the sub-list for method output_type + 1, // [1:1] is the sub-list for method input_type + 1, // [1:1] is the sub-list for extension type_name + 1, // [1:1] is the sub-list for extension extendee + 0, // [0:1] is the sub-list for field type_name +} + +func init() { file_lock_grpc_types_proto_init() } +func file_lock_grpc_types_proto_init() { + if File_lock_grpc_types_proto != nil { + return + } + if !protoimpl.UnsafeEnabled { + file_lock_grpc_types_proto_msgTypes[0].Exporter = func(v interface{}, i int) interface{} { + switch v := v.(*Lock); i { + case 0: + return &v.state + case 1: + return &v.sizeCache + case 2: + return &v.unknownFields + default: + return nil + } + } + } + type x struct{} + out := protoimpl.TypeBuilder{ + File: protoimpl.DescBuilder{ + GoPackagePath: reflect.TypeOf(x{}).PkgPath(), + RawDescriptor: file_lock_grpc_types_proto_rawDesc, + NumEnums: 0, + NumMessages: 1, + NumExtensions: 0, + NumServices: 0, + }, + GoTypes: file_lock_grpc_types_proto_goTypes, + DependencyIndexes: file_lock_grpc_types_proto_depIdxs, + MessageInfos: file_lock_grpc_types_proto_msgTypes, + }.Build() + File_lock_grpc_types_proto = out.File + file_lock_grpc_types_proto_rawDesc = nil + file_lock_grpc_types_proto_goTypes = nil + file_lock_grpc_types_proto_depIdxs = nil +} diff --git a/api/netmap/encoding.go b/api/netmap/encoding.go new file mode 100644 index 000000000..62703da62 --- /dev/null +++ b/api/netmap/encoding.go @@ -0,0 +1,363 @@ +package netmap + +import ( + "github.com/nspcc-dev/neofs-sdk-go/internal/proto" +) + +const ( + _ = iota + fieldReplicaCount + fieldReplicaSelector +) + +func (x *Replica) MarshaledSize() int { + var sz int + if x != nil { + sz = proto.SizeVarint(fieldReplicaCount, x.Count) + + proto.SizeBytes(fieldReplicaSelector, x.Selector) + } + return sz +} + +func (x *Replica) MarshalStable(b []byte) { + if x != nil { + off := proto.MarshalVarint(b, fieldReplicaCount, x.Count) + proto.MarshalBytes(b[off:], fieldReplicaSelector, x.Selector) + } +} + +const ( + _ = iota + fieldSelectorName + fieldSelectorCount + fieldSelectorClause + fieldSelectorAttribute + fieldSelectorFilter +) + +func (x *Selector) MarshaledSize() int { + var sz int + if x != nil { + sz = proto.SizeBytes(fieldSelectorName, x.Name) + + proto.SizeVarint(fieldSelectorCount, x.Count) + + proto.SizeVarint(fieldSelectorClause, int32(x.Clause)) + + proto.SizeBytes(fieldSelectorAttribute, x.Attribute) + + proto.SizeBytes(fieldSelectorFilter, x.Filter) + } + return sz +} + +func (x *Selector) MarshalStable(b []byte) { + if x != nil { + off := proto.MarshalBytes(b, fieldSelectorName, x.Name) + off += proto.MarshalVarint(b[off:], fieldSelectorCount, x.Count) + off += proto.MarshalVarint(b[off:], fieldSelectorClause, int32(x.Clause)) + off += proto.MarshalBytes(b[off:], fieldSelectorAttribute, x.Attribute) + proto.MarshalBytes(b[off:], fieldSelectorFilter, x.Filter) + } +} + +const ( + _ = iota + fieldFilterName + fieldFilterKey + fieldFilterOp + fieldFilterVal + fieldFilterSubs +) + +func (x *Filter) MarshaledSize() int { + var sz int + if x != nil { + sz = proto.SizeBytes(fieldFilterName, x.Name) + + proto.SizeBytes(fieldFilterKey, x.Key) + + proto.SizeVarint(fieldFilterOp, int32(x.Op)) + + proto.SizeBytes(fieldFilterVal, x.Value) + for i := range x.Filters { + sz += proto.SizeNested(fieldFilterSubs, x.Filters[i]) + } + } + return sz +} + +func (x *Filter) MarshalStable(b []byte) { + if x != nil { + off := proto.MarshalBytes(b, fieldFilterName, x.Name) + off += proto.MarshalBytes(b[off:], fieldFilterKey, x.Key) + off += proto.MarshalVarint(b[off:], fieldFilterOp, int32(x.Op)) + off += proto.MarshalBytes(b[off:], fieldFilterVal, x.Value) + for i := range x.Filters { + off += proto.MarshalNested(b[off:], fieldFilterSubs, x.Filters[i]) + } + } +} + +const ( + _ = iota + fieldPolicyReplicas + fieldPolicyBackupFactor + fieldPolicySelectors + fieldPolicyFilters + fieldPolicySubnet +) + +func (x *PlacementPolicy) MarshaledSize() int { + var sz int + if x != nil { + sz = proto.SizeVarint(fieldPolicyBackupFactor, x.ContainerBackupFactor) + + proto.SizeNested(fieldPolicySubnet, x.SubnetId) + for i := range x.Replicas { + sz += proto.SizeNested(fieldPolicyReplicas, x.Replicas[i]) + } + for i := range x.Selectors { + sz += proto.SizeNested(fieldPolicySelectors, x.Selectors[i]) + } + for i := range x.Filters { + sz += proto.SizeNested(fieldPolicyFilters, x.Filters[i]) + } + } + return sz +} + +func (x *PlacementPolicy) MarshalStable(b []byte) { + if x != nil { + var off int + for i := range x.Replicas { + off += proto.MarshalNested(b[off:], fieldPolicyReplicas, x.Replicas[i]) + } + off += proto.MarshalVarint(b[off:], fieldPolicyBackupFactor, x.ContainerBackupFactor) + for i := range x.Selectors { + off += proto.MarshalNested(b[off:], fieldPolicySelectors, x.Selectors[i]) + } + for i := range x.Filters { + off += proto.MarshalNested(b[off:], fieldPolicyFilters, x.Filters[i]) + } + proto.MarshalNested(b[off:], fieldPolicySubnet, x.SubnetId) + } +} + +const ( + _ = iota + fieldNetPrmKey + fieldNetPrmVal +) + +func (x *NetworkConfig_Parameter) MarshaledSize() int { + var sz int + if x != nil { + sz = proto.SizeBytes(fieldNetPrmKey, x.Key) + + proto.SizeBytes(fieldNetPrmVal, x.Value) + } + return sz +} + +func (x *NetworkConfig_Parameter) MarshalStable(b []byte) { + if x != nil { + off := proto.MarshalBytes(b, fieldNetPrmKey, x.Key) + proto.MarshalBytes(b[off:], fieldNetPrmVal, x.Value) + } +} + +const ( + _ = iota + fieldNetConfigPrms +) + +func (x *NetworkConfig) MarshaledSize() int { + var sz int + if x != nil { + for i := range x.Parameters { + sz += proto.SizeNested(fieldNetConfigPrms, x.Parameters[i]) + } + } + return sz +} + +func (x *NetworkConfig) MarshalStable(b []byte) { + if x != nil { + var off int + for i := range x.Parameters { + off += proto.MarshalNested(b[off:], fieldNetConfigPrms, x.Parameters[i]) + } + } +} + +const ( + _ = iota + fieldNetInfoCurEpoch + fieldNetInfoMagic + fieldNetInfoMSPerBlock + fieldNetInfoConfig +) + +func (x *NetworkInfo) MarshaledSize() int { + var sz int + if x != nil { + sz = proto.SizeVarint(fieldNetInfoCurEpoch, x.CurrentEpoch) + + proto.SizeVarint(fieldNetInfoMagic, x.MagicNumber) + + proto.SizeVarint(fieldNetInfoMSPerBlock, x.MsPerBlock) + + proto.SizeNested(fieldNetInfoConfig, x.NetworkConfig) + } + return sz +} + +func (x *NetworkInfo) MarshalStable(b []byte) { + if x != nil { + off := proto.MarshalVarint(b, fieldNetInfoCurEpoch, x.CurrentEpoch) + off += proto.MarshalVarint(b[off:], fieldNetInfoMagic, x.MagicNumber) + off += proto.MarshalVarint(b[off:], fieldNetInfoMSPerBlock, x.MsPerBlock) + proto.MarshalNested(b[off:], fieldNetInfoConfig, x.NetworkConfig) + } +} + +const ( + _ = iota + fieldNumNodeAttrKey + fieldNumNodeAttrVal + fieldNumNodeAttrParents +) + +func (x *NodeInfo_Attribute) MarshaledSize() int { + var sz int + if x != nil { + sz = proto.SizeBytes(fieldNumNodeAttrKey, x.Key) + + proto.SizeBytes(fieldNumNodeAttrVal, x.Value) + + proto.SizeRepeatedBytes(fieldNumNodeAttrParents, x.Parents) + } + return sz +} + +func (x *NodeInfo_Attribute) MarshalStable(b []byte) { + if x != nil { + off := proto.MarshalBytes(b, fieldNumNodeAttrKey, x.Key) + off += proto.MarshalBytes(b[off:], fieldNumNodeAttrVal, x.Value) + proto.MarshalRepeatedBytes(b[off:], fieldNumNodeAttrParents, x.Parents) + } +} + +const ( + _ = iota + fieldNodeInfoPubKey + fieldNodeInfoAddresses + fieldNodeInfoAttributes + fieldNodeInfoState +) + +func (x *NodeInfo) MarshaledSize() int { + var sz int + if x != nil { + sz = proto.SizeBytes(fieldNodeInfoPubKey, x.PublicKey) + + proto.SizeRepeatedBytes(fieldNodeInfoAddresses, x.Addresses) + + proto.SizeVarint(fieldNodeInfoState, int32(x.State)) + for i := range x.Attributes { + sz += proto.SizeNested(fieldNodeInfoAttributes, x.Attributes[i]) + } + } + return sz +} + +func (x *NodeInfo) MarshalStable(b []byte) { + if x != nil { + off := proto.MarshalBytes(b, fieldNodeInfoPubKey, x.PublicKey) + off += proto.MarshalRepeatedBytes(b[off:], fieldNodeInfoAddresses, x.Addresses) + for i := range x.Attributes { + off += proto.MarshalNested(b[off:], fieldNodeInfoAttributes, x.Attributes[i]) + } + proto.MarshalVarint(b[off:], fieldNodeInfoState, int32(x.State)) + } +} + +const ( + _ = iota + fieldNetmapEpoch + fieldNetmapNodes +) + +func (x *Netmap) MarshaledSize() int { + var sz int + if x != nil { + sz = proto.SizeVarint(fieldNetmapEpoch, x.Epoch) + for i := range x.Nodes { + sz += proto.SizeNested(fieldNetmapNodes, x.Nodes[i]) + } + } + return sz +} + +func (x *Netmap) MarshalStable(b []byte) { + if x != nil { + off := proto.MarshalVarint(b, fieldNetmapEpoch, x.Epoch) + for i := range x.Nodes { + off += proto.MarshalNested(b[off:], fieldNetmapNodes, x.Nodes[i]) + } + } +} + +func (x *LocalNodeInfoRequest_Body) MarshaledSize() int { return 0 } +func (x *LocalNodeInfoRequest_Body) MarshalStable([]byte) {} + +const ( + _ = iota + fieldNodeInfoRespVersion + fieldNodeInfoRespInfo +) + +func (x *LocalNodeInfoResponse_Body) MarshaledSize() int { + var sz int + if x != nil { + sz = proto.SizeNested(fieldNodeInfoRespVersion, x.Version) + + proto.SizeNested(fieldNodeInfoRespInfo, x.NodeInfo) + } + return sz +} + +func (x *LocalNodeInfoResponse_Body) MarshalStable(b []byte) { + if x != nil { + off := proto.MarshalNested(b, fieldNodeInfoRespVersion, x.Version) + proto.MarshalNested(b[off:], fieldNodeInfoRespInfo, x.NodeInfo) + } +} + +func (x *NetworkInfoRequest_Body) MarshaledSize() int { return 0 } +func (x *NetworkInfoRequest_Body) MarshalStable([]byte) {} + +const ( + _ = iota + fieldNetInfoRespInfo +) + +func (x *NetworkInfoResponse_Body) MarshaledSize() int { + var sz int + if x != nil { + sz = proto.SizeNested(fieldNetInfoRespInfo, x.NetworkInfo) + } + return sz +} + +func (x *NetworkInfoResponse_Body) MarshalStable(b []byte) { + if x != nil { + proto.MarshalNested(b, fieldNetInfoRespInfo, x.NetworkInfo) + } +} + +func (x *NetmapSnapshotRequest_Body) MarshaledSize() int { return 0 } +func (x *NetmapSnapshotRequest_Body) MarshalStable([]byte) {} + +const ( + _ = iota + fieldNetmapRespNetmap +) + +func (x *NetmapSnapshotResponse_Body) MarshaledSize() int { + var sz int + if x != nil { + sz = proto.SizeNested(fieldNetmapRespNetmap, x.Netmap) + } + return sz +} + +func (x *NetmapSnapshotResponse_Body) MarshalStable(b []byte) { + if x != nil { + proto.MarshalNested(b, fieldNetmapRespNetmap, x.Netmap) + } +} diff --git a/api/netmap/encoding_test.go b/api/netmap/encoding_test.go new file mode 100644 index 000000000..a213ab8e2 --- /dev/null +++ b/api/netmap/encoding_test.go @@ -0,0 +1,125 @@ +package netmap_test + +import ( + "testing" + + "github.com/nspcc-dev/neofs-sdk-go/api/netmap" + "github.com/nspcc-dev/neofs-sdk-go/api/refs" + "github.com/stretchr/testify/require" + "google.golang.org/protobuf/proto" +) + +func TestLocalNodeInfoRequest_Body(t *testing.T) { + var v netmap.LocalNodeInfoRequest_Body + require.Zero(t, v.MarshaledSize()) + require.NotPanics(t, func() { v.MarshalStable(nil) }) + b := []byte("not_a_protobuf") + v.MarshalStable(b) + require.EqualValues(t, "not_a_protobuf", b) +} + +func TestLocalNodeInfoResponse_Body(t *testing.T) { + v := &netmap.LocalNodeInfoResponse_Body{ + Version: &refs.Version{Major: 1, Minor: 2}, + NodeInfo: &netmap.NodeInfo{ + PublicKey: []byte("any_key"), + Addresses: []string{"addr1", "addr2"}, + Attributes: []*netmap.NodeInfo_Attribute{ + {Key: "key1", Value: "val1", Parents: []string{"par1", "par2"}}, + {Key: "key2", Value: "val2", Parents: []string{"par3", "par4"}}, + }, + State: 3, + }, + } + + sz := v.MarshaledSize() + b := make([]byte, sz) + v.MarshalStable(b) + + var res netmap.LocalNodeInfoResponse_Body + err := proto.Unmarshal(b, &res) + require.NoError(t, err) + require.Empty(t, res.ProtoReflect().GetUnknown()) + require.Equal(t, v.Version, res.Version) + require.Equal(t, v.NodeInfo, res.NodeInfo) +} + +func TestNetworkInfoRequest_Body(t *testing.T) { + var v netmap.NetworkInfoRequest_Body + require.Zero(t, v.MarshaledSize()) + require.NotPanics(t, func() { v.MarshalStable(nil) }) + b := []byte("not_a_protobuf") + v.MarshalStable(b) + require.EqualValues(t, "not_a_protobuf", b) +} + +func TestNetworkInfoResponse_Body(t *testing.T) { + v := &netmap.NetworkInfoResponse_Body{ + NetworkInfo: &netmap.NetworkInfo{ + CurrentEpoch: 1, MagicNumber: 2, MsPerBlock: 3, + NetworkConfig: &netmap.NetworkConfig{ + Parameters: []*netmap.NetworkConfig_Parameter{ + {Key: []byte("key1"), Value: []byte("val1")}, + {Key: []byte("key2"), Value: []byte("val2")}, + }, + }, + }, + } + + sz := v.MarshaledSize() + b := make([]byte, sz) + v.MarshalStable(b) + + var res netmap.NetworkInfoResponse_Body + err := proto.Unmarshal(b, &res) + require.NoError(t, err) + require.Empty(t, res.ProtoReflect().GetUnknown()) + require.Equal(t, v.NetworkInfo, res.NetworkInfo) +} + +func TestNetmapSnapshotRequest_Body(t *testing.T) { + var v netmap.NetworkInfoRequest_Body + require.Zero(t, v.MarshaledSize()) + require.NotPanics(t, func() { v.MarshalStable(nil) }) + b := []byte("not_a_protobuf") + v.MarshalStable(b) + require.EqualValues(t, "not_a_protobuf", b) +} + +func TestNetmapSnapshotResponse_Body(t *testing.T) { + v := &netmap.NetmapSnapshotResponse_Body{ + Netmap: &netmap.Netmap{ + Epoch: 1, + Nodes: []*netmap.NodeInfo{ + { + PublicKey: []byte("any_key1"), + Addresses: []string{"addr1", "addr2"}, + Attributes: []*netmap.NodeInfo_Attribute{ + {Key: "key1", Value: "val1", Parents: []string{"par1", "par2"}}, + {Key: "key2", Value: "val2", Parents: []string{"par3", "par4"}}, + }, + State: 2, + }, + { + PublicKey: []byte("any_key2"), + Addresses: []string{"addr3", "addr4"}, + Attributes: []*netmap.NodeInfo_Attribute{ + {Key: "key3", Value: "val3", Parents: []string{"par5", "par6"}}, + {Key: "key4", Value: "val4", Parents: []string{"par7", "par8"}}, + }, + State: 3, + }, + }, + }, + } + + sz := v.MarshaledSize() + b := make([]byte, sz) + v.MarshalStable(b) + + var res netmap.NetmapSnapshotResponse_Body + err := proto.Unmarshal(b, &res) + require.NoError(t, err) + require.Empty(t, res.ProtoReflect().GetUnknown()) + require.Equal(t, v.Netmap, res.Netmap) +} diff --git a/api/netmap/service.pb.go b/api/netmap/service.pb.go new file mode 100644 index 000000000..86fa6fb80 --- /dev/null +++ b/api/netmap/service.pb.go @@ -0,0 +1,1108 @@ +// Code generated by protoc-gen-go. DO NOT EDIT. +// versions: +// protoc-gen-go v1.32.0 +// protoc v4.25.1 +// source: netmap/grpc/service.proto + +package netmap + +import ( + reflect "reflect" + sync "sync" + + "github.com/nspcc-dev/neofs-sdk-go/api/refs" + "github.com/nspcc-dev/neofs-sdk-go/api/session" + protoreflect "google.golang.org/protobuf/reflect/protoreflect" + protoimpl "google.golang.org/protobuf/runtime/protoimpl" +) + +const ( + // Verify that this generated code is sufficiently up-to-date. + _ = protoimpl.EnforceVersion(20 - protoimpl.MinVersion) + // Verify that runtime/protoimpl is sufficiently up-to-date. + _ = protoimpl.EnforceVersion(protoimpl.MaxVersion - 20) +) + +// Get NodeInfo structure directly from a particular node +type LocalNodeInfoRequest struct { + state protoimpl.MessageState + sizeCache protoimpl.SizeCache + unknownFields protoimpl.UnknownFields + + // Body of the LocalNodeInfo request message + Body *LocalNodeInfoRequest_Body `protobuf:"bytes,1,opt,name=body,proto3" json:"body,omitempty"` + // Carries request meta information. Header data is used only to regulate + // message transport and does not affect request execution. + MetaHeader *session.RequestMetaHeader `protobuf:"bytes,2,opt,name=meta_header,json=metaHeader,proto3" json:"meta_header,omitempty"` + // Carries request verification information. This header is used to + // authenticate the nodes of the message route and check the correctness of + // transmission. + VerifyHeader *session.RequestVerificationHeader `protobuf:"bytes,3,opt,name=verify_header,json=verifyHeader,proto3" json:"verify_header,omitempty"` +} + +func (x *LocalNodeInfoRequest) Reset() { + *x = LocalNodeInfoRequest{} + if protoimpl.UnsafeEnabled { + mi := &file_netmap_grpc_service_proto_msgTypes[0] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) + } +} + +func (x *LocalNodeInfoRequest) String() string { + return protoimpl.X.MessageStringOf(x) +} + +func (*LocalNodeInfoRequest) ProtoMessage() {} + +func (x *LocalNodeInfoRequest) ProtoReflect() protoreflect.Message { + mi := &file_netmap_grpc_service_proto_msgTypes[0] + if protoimpl.UnsafeEnabled && x != nil { + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + if ms.LoadMessageInfo() == nil { + ms.StoreMessageInfo(mi) + } + return ms + } + return mi.MessageOf(x) +} + +// Deprecated: Use LocalNodeInfoRequest.ProtoReflect.Descriptor instead. +func (*LocalNodeInfoRequest) Descriptor() ([]byte, []int) { + return file_netmap_grpc_service_proto_rawDescGZIP(), []int{0} +} + +func (x *LocalNodeInfoRequest) GetBody() *LocalNodeInfoRequest_Body { + if x != nil { + return x.Body + } + return nil +} + +func (x *LocalNodeInfoRequest) GetMetaHeader() *session.RequestMetaHeader { + if x != nil { + return x.MetaHeader + } + return nil +} + +func (x *LocalNodeInfoRequest) GetVerifyHeader() *session.RequestVerificationHeader { + if x != nil { + return x.VerifyHeader + } + return nil +} + +// Local Node Info, including API Version in use +type LocalNodeInfoResponse struct { + state protoimpl.MessageState + sizeCache protoimpl.SizeCache + unknownFields protoimpl.UnknownFields + + // Body of the balance response message. + Body *LocalNodeInfoResponse_Body `protobuf:"bytes,1,opt,name=body,proto3" json:"body,omitempty"` + // Carries response meta information. Header data is used only to regulate + // message transport and does not affect response execution. + MetaHeader *session.ResponseMetaHeader `protobuf:"bytes,2,opt,name=meta_header,json=metaHeader,proto3" json:"meta_header,omitempty"` + // Carries response verification information. This header is used to + // authenticate the nodes of the message route and check the correctness of + // transmission. + VerifyHeader *session.ResponseVerificationHeader `protobuf:"bytes,3,opt,name=verify_header,json=verifyHeader,proto3" json:"verify_header,omitempty"` +} + +func (x *LocalNodeInfoResponse) Reset() { + *x = LocalNodeInfoResponse{} + if protoimpl.UnsafeEnabled { + mi := &file_netmap_grpc_service_proto_msgTypes[1] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) + } +} + +func (x *LocalNodeInfoResponse) String() string { + return protoimpl.X.MessageStringOf(x) +} + +func (*LocalNodeInfoResponse) ProtoMessage() {} + +func (x *LocalNodeInfoResponse) ProtoReflect() protoreflect.Message { + mi := &file_netmap_grpc_service_proto_msgTypes[1] + if protoimpl.UnsafeEnabled && x != nil { + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + if ms.LoadMessageInfo() == nil { + ms.StoreMessageInfo(mi) + } + return ms + } + return mi.MessageOf(x) +} + +// Deprecated: Use LocalNodeInfoResponse.ProtoReflect.Descriptor instead. +func (*LocalNodeInfoResponse) Descriptor() ([]byte, []int) { + return file_netmap_grpc_service_proto_rawDescGZIP(), []int{1} +} + +func (x *LocalNodeInfoResponse) GetBody() *LocalNodeInfoResponse_Body { + if x != nil { + return x.Body + } + return nil +} + +func (x *LocalNodeInfoResponse) GetMetaHeader() *session.ResponseMetaHeader { + if x != nil { + return x.MetaHeader + } + return nil +} + +func (x *LocalNodeInfoResponse) GetVerifyHeader() *session.ResponseVerificationHeader { + if x != nil { + return x.VerifyHeader + } + return nil +} + +// Get NetworkInfo structure with the network view from a particular node. +type NetworkInfoRequest struct { + state protoimpl.MessageState + sizeCache protoimpl.SizeCache + unknownFields protoimpl.UnknownFields + + // Body of the NetworkInfo request message + Body *NetworkInfoRequest_Body `protobuf:"bytes,1,opt,name=body,proto3" json:"body,omitempty"` + // Carries request meta information. Header data is used only to regulate + // message transport and does not affect request execution. + MetaHeader *session.RequestMetaHeader `protobuf:"bytes,2,opt,name=meta_header,json=metaHeader,proto3" json:"meta_header,omitempty"` + // Carries request verification information. This header is used to + // authenticate the nodes of the message route and check the correctness of + // transmission. + VerifyHeader *session.RequestVerificationHeader `protobuf:"bytes,3,opt,name=verify_header,json=verifyHeader,proto3" json:"verify_header,omitempty"` +} + +func (x *NetworkInfoRequest) Reset() { + *x = NetworkInfoRequest{} + if protoimpl.UnsafeEnabled { + mi := &file_netmap_grpc_service_proto_msgTypes[2] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) + } +} + +func (x *NetworkInfoRequest) String() string { + return protoimpl.X.MessageStringOf(x) +} + +func (*NetworkInfoRequest) ProtoMessage() {} + +func (x *NetworkInfoRequest) ProtoReflect() protoreflect.Message { + mi := &file_netmap_grpc_service_proto_msgTypes[2] + if protoimpl.UnsafeEnabled && x != nil { + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + if ms.LoadMessageInfo() == nil { + ms.StoreMessageInfo(mi) + } + return ms + } + return mi.MessageOf(x) +} + +// Deprecated: Use NetworkInfoRequest.ProtoReflect.Descriptor instead. +func (*NetworkInfoRequest) Descriptor() ([]byte, []int) { + return file_netmap_grpc_service_proto_rawDescGZIP(), []int{2} +} + +func (x *NetworkInfoRequest) GetBody() *NetworkInfoRequest_Body { + if x != nil { + return x.Body + } + return nil +} + +func (x *NetworkInfoRequest) GetMetaHeader() *session.RequestMetaHeader { + if x != nil { + return x.MetaHeader + } + return nil +} + +func (x *NetworkInfoRequest) GetVerifyHeader() *session.RequestVerificationHeader { + if x != nil { + return x.VerifyHeader + } + return nil +} + +// Response with NetworkInfo structure including current epoch and +// sidechain magic number. +type NetworkInfoResponse struct { + state protoimpl.MessageState + sizeCache protoimpl.SizeCache + unknownFields protoimpl.UnknownFields + + // Body of the NetworkInfo response message. + Body *NetworkInfoResponse_Body `protobuf:"bytes,1,opt,name=body,proto3" json:"body,omitempty"` + // Carries response meta information. Header data is used only to regulate + // message transport and does not affect response execution. + MetaHeader *session.ResponseMetaHeader `protobuf:"bytes,2,opt,name=meta_header,json=metaHeader,proto3" json:"meta_header,omitempty"` + // Carries response verification information. This header is used to + // authenticate the nodes of the message route and check the correctness of + // transmission. + VerifyHeader *session.ResponseVerificationHeader `protobuf:"bytes,3,opt,name=verify_header,json=verifyHeader,proto3" json:"verify_header,omitempty"` +} + +func (x *NetworkInfoResponse) Reset() { + *x = NetworkInfoResponse{} + if protoimpl.UnsafeEnabled { + mi := &file_netmap_grpc_service_proto_msgTypes[3] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) + } +} + +func (x *NetworkInfoResponse) String() string { + return protoimpl.X.MessageStringOf(x) +} + +func (*NetworkInfoResponse) ProtoMessage() {} + +func (x *NetworkInfoResponse) ProtoReflect() protoreflect.Message { + mi := &file_netmap_grpc_service_proto_msgTypes[3] + if protoimpl.UnsafeEnabled && x != nil { + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + if ms.LoadMessageInfo() == nil { + ms.StoreMessageInfo(mi) + } + return ms + } + return mi.MessageOf(x) +} + +// Deprecated: Use NetworkInfoResponse.ProtoReflect.Descriptor instead. +func (*NetworkInfoResponse) Descriptor() ([]byte, []int) { + return file_netmap_grpc_service_proto_rawDescGZIP(), []int{3} +} + +func (x *NetworkInfoResponse) GetBody() *NetworkInfoResponse_Body { + if x != nil { + return x.Body + } + return nil +} + +func (x *NetworkInfoResponse) GetMetaHeader() *session.ResponseMetaHeader { + if x != nil { + return x.MetaHeader + } + return nil +} + +func (x *NetworkInfoResponse) GetVerifyHeader() *session.ResponseVerificationHeader { + if x != nil { + return x.VerifyHeader + } + return nil +} + +// Get netmap snapshot request +type NetmapSnapshotRequest struct { + state protoimpl.MessageState + sizeCache protoimpl.SizeCache + unknownFields protoimpl.UnknownFields + + // Body of get netmap snapshot request message. + Body *NetmapSnapshotRequest_Body `protobuf:"bytes,1,opt,name=body,proto3" json:"body,omitempty"` + // Carries request meta information. Header data is used only to regulate + // message transport and does not affect request execution. + MetaHeader *session.RequestMetaHeader `protobuf:"bytes,2,opt,name=meta_header,json=metaHeader,proto3" json:"meta_header,omitempty"` + // Carries request verification information. This header is used to + // authenticate the nodes of the message route and check the correctness of + // transmission. + VerifyHeader *session.RequestVerificationHeader `protobuf:"bytes,3,opt,name=verify_header,json=verifyHeader,proto3" json:"verify_header,omitempty"` +} + +func (x *NetmapSnapshotRequest) Reset() { + *x = NetmapSnapshotRequest{} + if protoimpl.UnsafeEnabled { + mi := &file_netmap_grpc_service_proto_msgTypes[4] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) + } +} + +func (x *NetmapSnapshotRequest) String() string { + return protoimpl.X.MessageStringOf(x) +} + +func (*NetmapSnapshotRequest) ProtoMessage() {} + +func (x *NetmapSnapshotRequest) ProtoReflect() protoreflect.Message { + mi := &file_netmap_grpc_service_proto_msgTypes[4] + if protoimpl.UnsafeEnabled && x != nil { + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + if ms.LoadMessageInfo() == nil { + ms.StoreMessageInfo(mi) + } + return ms + } + return mi.MessageOf(x) +} + +// Deprecated: Use NetmapSnapshotRequest.ProtoReflect.Descriptor instead. +func (*NetmapSnapshotRequest) Descriptor() ([]byte, []int) { + return file_netmap_grpc_service_proto_rawDescGZIP(), []int{4} +} + +func (x *NetmapSnapshotRequest) GetBody() *NetmapSnapshotRequest_Body { + if x != nil { + return x.Body + } + return nil +} + +func (x *NetmapSnapshotRequest) GetMetaHeader() *session.RequestMetaHeader { + if x != nil { + return x.MetaHeader + } + return nil +} + +func (x *NetmapSnapshotRequest) GetVerifyHeader() *session.RequestVerificationHeader { + if x != nil { + return x.VerifyHeader + } + return nil +} + +// Response with current netmap snapshot +type NetmapSnapshotResponse struct { + state protoimpl.MessageState + sizeCache protoimpl.SizeCache + unknownFields protoimpl.UnknownFields + + // Body of get netmap snapshot response message. + Body *NetmapSnapshotResponse_Body `protobuf:"bytes,1,opt,name=body,proto3" json:"body,omitempty"` + // Carries response meta information. Header data is used only to regulate + // message transport and does not affect response execution. + MetaHeader *session.ResponseMetaHeader `protobuf:"bytes,2,opt,name=meta_header,json=metaHeader,proto3" json:"meta_header,omitempty"` + // Carries response verification information. This header is used to + // authenticate the nodes of the message route and check the correctness of + // transmission. + VerifyHeader *session.ResponseVerificationHeader `protobuf:"bytes,3,opt,name=verify_header,json=verifyHeader,proto3" json:"verify_header,omitempty"` +} + +func (x *NetmapSnapshotResponse) Reset() { + *x = NetmapSnapshotResponse{} + if protoimpl.UnsafeEnabled { + mi := &file_netmap_grpc_service_proto_msgTypes[5] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) + } +} + +func (x *NetmapSnapshotResponse) String() string { + return protoimpl.X.MessageStringOf(x) +} + +func (*NetmapSnapshotResponse) ProtoMessage() {} + +func (x *NetmapSnapshotResponse) ProtoReflect() protoreflect.Message { + mi := &file_netmap_grpc_service_proto_msgTypes[5] + if protoimpl.UnsafeEnabled && x != nil { + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + if ms.LoadMessageInfo() == nil { + ms.StoreMessageInfo(mi) + } + return ms + } + return mi.MessageOf(x) +} + +// Deprecated: Use NetmapSnapshotResponse.ProtoReflect.Descriptor instead. +func (*NetmapSnapshotResponse) Descriptor() ([]byte, []int) { + return file_netmap_grpc_service_proto_rawDescGZIP(), []int{5} +} + +func (x *NetmapSnapshotResponse) GetBody() *NetmapSnapshotResponse_Body { + if x != nil { + return x.Body + } + return nil +} + +func (x *NetmapSnapshotResponse) GetMetaHeader() *session.ResponseMetaHeader { + if x != nil { + return x.MetaHeader + } + return nil +} + +func (x *NetmapSnapshotResponse) GetVerifyHeader() *session.ResponseVerificationHeader { + if x != nil { + return x.VerifyHeader + } + return nil +} + +// LocalNodeInfo request body is empty. +type LocalNodeInfoRequest_Body struct { + state protoimpl.MessageState + sizeCache protoimpl.SizeCache + unknownFields protoimpl.UnknownFields +} + +func (x *LocalNodeInfoRequest_Body) Reset() { + *x = LocalNodeInfoRequest_Body{} + if protoimpl.UnsafeEnabled { + mi := &file_netmap_grpc_service_proto_msgTypes[6] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) + } +} + +func (x *LocalNodeInfoRequest_Body) String() string { + return protoimpl.X.MessageStringOf(x) +} + +func (*LocalNodeInfoRequest_Body) ProtoMessage() {} + +func (x *LocalNodeInfoRequest_Body) ProtoReflect() protoreflect.Message { + mi := &file_netmap_grpc_service_proto_msgTypes[6] + if protoimpl.UnsafeEnabled && x != nil { + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + if ms.LoadMessageInfo() == nil { + ms.StoreMessageInfo(mi) + } + return ms + } + return mi.MessageOf(x) +} + +// Deprecated: Use LocalNodeInfoRequest_Body.ProtoReflect.Descriptor instead. +func (*LocalNodeInfoRequest_Body) Descriptor() ([]byte, []int) { + return file_netmap_grpc_service_proto_rawDescGZIP(), []int{0, 0} +} + +// Local Node Info, including API Version in use. +type LocalNodeInfoResponse_Body struct { + state protoimpl.MessageState + sizeCache protoimpl.SizeCache + unknownFields protoimpl.UnknownFields + + // Latest NeoFS API version in use + Version *refs.Version `protobuf:"bytes,1,opt,name=version,proto3" json:"version,omitempty"` + // NodeInfo structure with recent information from node itself + NodeInfo *NodeInfo `protobuf:"bytes,2,opt,name=node_info,json=nodeInfo,proto3" json:"node_info,omitempty"` +} + +func (x *LocalNodeInfoResponse_Body) Reset() { + *x = LocalNodeInfoResponse_Body{} + if protoimpl.UnsafeEnabled { + mi := &file_netmap_grpc_service_proto_msgTypes[7] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) + } +} + +func (x *LocalNodeInfoResponse_Body) String() string { + return protoimpl.X.MessageStringOf(x) +} + +func (*LocalNodeInfoResponse_Body) ProtoMessage() {} + +func (x *LocalNodeInfoResponse_Body) ProtoReflect() protoreflect.Message { + mi := &file_netmap_grpc_service_proto_msgTypes[7] + if protoimpl.UnsafeEnabled && x != nil { + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + if ms.LoadMessageInfo() == nil { + ms.StoreMessageInfo(mi) + } + return ms + } + return mi.MessageOf(x) +} + +// Deprecated: Use LocalNodeInfoResponse_Body.ProtoReflect.Descriptor instead. +func (*LocalNodeInfoResponse_Body) Descriptor() ([]byte, []int) { + return file_netmap_grpc_service_proto_rawDescGZIP(), []int{1, 0} +} + +func (x *LocalNodeInfoResponse_Body) GetVersion() *refs.Version { + if x != nil { + return x.Version + } + return nil +} + +func (x *LocalNodeInfoResponse_Body) GetNodeInfo() *NodeInfo { + if x != nil { + return x.NodeInfo + } + return nil +} + +// NetworkInfo request body is empty. +type NetworkInfoRequest_Body struct { + state protoimpl.MessageState + sizeCache protoimpl.SizeCache + unknownFields protoimpl.UnknownFields +} + +func (x *NetworkInfoRequest_Body) Reset() { + *x = NetworkInfoRequest_Body{} + if protoimpl.UnsafeEnabled { + mi := &file_netmap_grpc_service_proto_msgTypes[8] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) + } +} + +func (x *NetworkInfoRequest_Body) String() string { + return protoimpl.X.MessageStringOf(x) +} + +func (*NetworkInfoRequest_Body) ProtoMessage() {} + +func (x *NetworkInfoRequest_Body) ProtoReflect() protoreflect.Message { + mi := &file_netmap_grpc_service_proto_msgTypes[8] + if protoimpl.UnsafeEnabled && x != nil { + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + if ms.LoadMessageInfo() == nil { + ms.StoreMessageInfo(mi) + } + return ms + } + return mi.MessageOf(x) +} + +// Deprecated: Use NetworkInfoRequest_Body.ProtoReflect.Descriptor instead. +func (*NetworkInfoRequest_Body) Descriptor() ([]byte, []int) { + return file_netmap_grpc_service_proto_rawDescGZIP(), []int{2, 0} +} + +// Information about the network. +type NetworkInfoResponse_Body struct { + state protoimpl.MessageState + sizeCache protoimpl.SizeCache + unknownFields protoimpl.UnknownFields + + // NetworkInfo structure with recent information. + NetworkInfo *NetworkInfo `protobuf:"bytes,1,opt,name=network_info,json=networkInfo,proto3" json:"network_info,omitempty"` +} + +func (x *NetworkInfoResponse_Body) Reset() { + *x = NetworkInfoResponse_Body{} + if protoimpl.UnsafeEnabled { + mi := &file_netmap_grpc_service_proto_msgTypes[9] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) + } +} + +func (x *NetworkInfoResponse_Body) String() string { + return protoimpl.X.MessageStringOf(x) +} + +func (*NetworkInfoResponse_Body) ProtoMessage() {} + +func (x *NetworkInfoResponse_Body) ProtoReflect() protoreflect.Message { + mi := &file_netmap_grpc_service_proto_msgTypes[9] + if protoimpl.UnsafeEnabled && x != nil { + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + if ms.LoadMessageInfo() == nil { + ms.StoreMessageInfo(mi) + } + return ms + } + return mi.MessageOf(x) +} + +// Deprecated: Use NetworkInfoResponse_Body.ProtoReflect.Descriptor instead. +func (*NetworkInfoResponse_Body) Descriptor() ([]byte, []int) { + return file_netmap_grpc_service_proto_rawDescGZIP(), []int{3, 0} +} + +func (x *NetworkInfoResponse_Body) GetNetworkInfo() *NetworkInfo { + if x != nil { + return x.NetworkInfo + } + return nil +} + +// Get netmap snapshot request body. +type NetmapSnapshotRequest_Body struct { + state protoimpl.MessageState + sizeCache protoimpl.SizeCache + unknownFields protoimpl.UnknownFields +} + +func (x *NetmapSnapshotRequest_Body) Reset() { + *x = NetmapSnapshotRequest_Body{} + if protoimpl.UnsafeEnabled { + mi := &file_netmap_grpc_service_proto_msgTypes[10] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) + } +} + +func (x *NetmapSnapshotRequest_Body) String() string { + return protoimpl.X.MessageStringOf(x) +} + +func (*NetmapSnapshotRequest_Body) ProtoMessage() {} + +func (x *NetmapSnapshotRequest_Body) ProtoReflect() protoreflect.Message { + mi := &file_netmap_grpc_service_proto_msgTypes[10] + if protoimpl.UnsafeEnabled && x != nil { + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + if ms.LoadMessageInfo() == nil { + ms.StoreMessageInfo(mi) + } + return ms + } + return mi.MessageOf(x) +} + +// Deprecated: Use NetmapSnapshotRequest_Body.ProtoReflect.Descriptor instead. +func (*NetmapSnapshotRequest_Body) Descriptor() ([]byte, []int) { + return file_netmap_grpc_service_proto_rawDescGZIP(), []int{4, 0} +} + +// Get netmap snapshot response body +type NetmapSnapshotResponse_Body struct { + state protoimpl.MessageState + sizeCache protoimpl.SizeCache + unknownFields protoimpl.UnknownFields + + // Structure of the requested network map. + Netmap *Netmap `protobuf:"bytes,1,opt,name=netmap,proto3" json:"netmap,omitempty"` +} + +func (x *NetmapSnapshotResponse_Body) Reset() { + *x = NetmapSnapshotResponse_Body{} + if protoimpl.UnsafeEnabled { + mi := &file_netmap_grpc_service_proto_msgTypes[11] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) + } +} + +func (x *NetmapSnapshotResponse_Body) String() string { + return protoimpl.X.MessageStringOf(x) +} + +func (*NetmapSnapshotResponse_Body) ProtoMessage() {} + +func (x *NetmapSnapshotResponse_Body) ProtoReflect() protoreflect.Message { + mi := &file_netmap_grpc_service_proto_msgTypes[11] + if protoimpl.UnsafeEnabled && x != nil { + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + if ms.LoadMessageInfo() == nil { + ms.StoreMessageInfo(mi) + } + return ms + } + return mi.MessageOf(x) +} + +// Deprecated: Use NetmapSnapshotResponse_Body.ProtoReflect.Descriptor instead. +func (*NetmapSnapshotResponse_Body) Descriptor() ([]byte, []int) { + return file_netmap_grpc_service_proto_rawDescGZIP(), []int{5, 0} +} + +func (x *NetmapSnapshotResponse_Body) GetNetmap() *Netmap { + if x != nil { + return x.Netmap + } + return nil +} + +var File_netmap_grpc_service_proto protoreflect.FileDescriptor + +var file_netmap_grpc_service_proto_rawDesc = []byte{ + 0x0a, 0x19, 0x6e, 0x65, 0x74, 0x6d, 0x61, 0x70, 0x2f, 0x67, 0x72, 0x70, 0x63, 0x2f, 0x73, 0x65, + 0x72, 0x76, 0x69, 0x63, 0x65, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x12, 0x10, 0x6e, 0x65, 0x6f, + 0x2e, 0x66, 0x73, 0x2e, 0x76, 0x32, 0x2e, 0x6e, 0x65, 0x74, 0x6d, 0x61, 0x70, 0x1a, 0x17, 0x6e, + 0x65, 0x74, 0x6d, 0x61, 0x70, 0x2f, 0x67, 0x72, 0x70, 0x63, 0x2f, 0x74, 0x79, 0x70, 0x65, 0x73, + 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x1a, 0x15, 0x72, 0x65, 0x66, 0x73, 0x2f, 0x67, 0x72, 0x70, + 0x63, 0x2f, 0x74, 0x79, 0x70, 0x65, 0x73, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x1a, 0x18, 0x73, + 0x65, 0x73, 0x73, 0x69, 0x6f, 0x6e, 0x2f, 0x67, 0x72, 0x70, 0x63, 0x2f, 0x74, 0x79, 0x70, 0x65, + 0x73, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x22, 0xf9, 0x01, 0x0a, 0x14, 0x4c, 0x6f, 0x63, 0x61, + 0x6c, 0x4e, 0x6f, 0x64, 0x65, 0x49, 0x6e, 0x66, 0x6f, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, + 0x12, 0x3f, 0x0a, 0x04, 0x62, 0x6f, 0x64, 0x79, 0x18, 0x01, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x2b, + 0x2e, 0x6e, 0x65, 0x6f, 0x2e, 0x66, 0x73, 0x2e, 0x76, 0x32, 0x2e, 0x6e, 0x65, 0x74, 0x6d, 0x61, + 0x70, 0x2e, 0x4c, 0x6f, 0x63, 0x61, 0x6c, 0x4e, 0x6f, 0x64, 0x65, 0x49, 0x6e, 0x66, 0x6f, 0x52, + 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x2e, 0x42, 0x6f, 0x64, 0x79, 0x52, 0x04, 0x62, 0x6f, 0x64, + 0x79, 0x12, 0x45, 0x0a, 0x0b, 0x6d, 0x65, 0x74, 0x61, 0x5f, 0x68, 0x65, 0x61, 0x64, 0x65, 0x72, + 0x18, 0x02, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x24, 0x2e, 0x6e, 0x65, 0x6f, 0x2e, 0x66, 0x73, 0x2e, + 0x76, 0x32, 0x2e, 0x73, 0x65, 0x73, 0x73, 0x69, 0x6f, 0x6e, 0x2e, 0x52, 0x65, 0x71, 0x75, 0x65, + 0x73, 0x74, 0x4d, 0x65, 0x74, 0x61, 0x48, 0x65, 0x61, 0x64, 0x65, 0x72, 0x52, 0x0a, 0x6d, 0x65, + 0x74, 0x61, 0x48, 0x65, 0x61, 0x64, 0x65, 0x72, 0x12, 0x51, 0x0a, 0x0d, 0x76, 0x65, 0x72, 0x69, + 0x66, 0x79, 0x5f, 0x68, 0x65, 0x61, 0x64, 0x65, 0x72, 0x18, 0x03, 0x20, 0x01, 0x28, 0x0b, 0x32, + 0x2c, 0x2e, 0x6e, 0x65, 0x6f, 0x2e, 0x66, 0x73, 0x2e, 0x76, 0x32, 0x2e, 0x73, 0x65, 0x73, 0x73, + 0x69, 0x6f, 0x6e, 0x2e, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x56, 0x65, 0x72, 0x69, 0x66, + 0x69, 0x63, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x48, 0x65, 0x61, 0x64, 0x65, 0x72, 0x52, 0x0c, 0x76, + 0x65, 0x72, 0x69, 0x66, 0x79, 0x48, 0x65, 0x61, 0x64, 0x65, 0x72, 0x1a, 0x06, 0x0a, 0x04, 0x42, + 0x6f, 0x64, 0x79, 0x22, 0xe9, 0x02, 0x0a, 0x15, 0x4c, 0x6f, 0x63, 0x61, 0x6c, 0x4e, 0x6f, 0x64, + 0x65, 0x49, 0x6e, 0x66, 0x6f, 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x12, 0x40, 0x0a, + 0x04, 0x62, 0x6f, 0x64, 0x79, 0x18, 0x01, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x2c, 0x2e, 0x6e, 0x65, + 0x6f, 0x2e, 0x66, 0x73, 0x2e, 0x76, 0x32, 0x2e, 0x6e, 0x65, 0x74, 0x6d, 0x61, 0x70, 0x2e, 0x4c, + 0x6f, 0x63, 0x61, 0x6c, 0x4e, 0x6f, 0x64, 0x65, 0x49, 0x6e, 0x66, 0x6f, 0x52, 0x65, 0x73, 0x70, + 0x6f, 0x6e, 0x73, 0x65, 0x2e, 0x42, 0x6f, 0x64, 0x79, 0x52, 0x04, 0x62, 0x6f, 0x64, 0x79, 0x12, + 0x46, 0x0a, 0x0b, 0x6d, 0x65, 0x74, 0x61, 0x5f, 0x68, 0x65, 0x61, 0x64, 0x65, 0x72, 0x18, 0x02, + 0x20, 0x01, 0x28, 0x0b, 0x32, 0x25, 0x2e, 0x6e, 0x65, 0x6f, 0x2e, 0x66, 0x73, 0x2e, 0x76, 0x32, + 0x2e, 0x73, 0x65, 0x73, 0x73, 0x69, 0x6f, 0x6e, 0x2e, 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, + 0x65, 0x4d, 0x65, 0x74, 0x61, 0x48, 0x65, 0x61, 0x64, 0x65, 0x72, 0x52, 0x0a, 0x6d, 0x65, 0x74, + 0x61, 0x48, 0x65, 0x61, 0x64, 0x65, 0x72, 0x12, 0x52, 0x0a, 0x0d, 0x76, 0x65, 0x72, 0x69, 0x66, + 0x79, 0x5f, 0x68, 0x65, 0x61, 0x64, 0x65, 0x72, 0x18, 0x03, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x2d, + 0x2e, 0x6e, 0x65, 0x6f, 0x2e, 0x66, 0x73, 0x2e, 0x76, 0x32, 0x2e, 0x73, 0x65, 0x73, 0x73, 0x69, + 0x6f, 0x6e, 0x2e, 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x56, 0x65, 0x72, 0x69, 0x66, + 0x69, 0x63, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x48, 0x65, 0x61, 0x64, 0x65, 0x72, 0x52, 0x0c, 0x76, + 0x65, 0x72, 0x69, 0x66, 0x79, 0x48, 0x65, 0x61, 0x64, 0x65, 0x72, 0x1a, 0x72, 0x0a, 0x04, 0x42, + 0x6f, 0x64, 0x79, 0x12, 0x31, 0x0a, 0x07, 0x76, 0x65, 0x72, 0x73, 0x69, 0x6f, 0x6e, 0x18, 0x01, + 0x20, 0x01, 0x28, 0x0b, 0x32, 0x17, 0x2e, 0x6e, 0x65, 0x6f, 0x2e, 0x66, 0x73, 0x2e, 0x76, 0x32, + 0x2e, 0x72, 0x65, 0x66, 0x73, 0x2e, 0x56, 0x65, 0x72, 0x73, 0x69, 0x6f, 0x6e, 0x52, 0x07, 0x76, + 0x65, 0x72, 0x73, 0x69, 0x6f, 0x6e, 0x12, 0x37, 0x0a, 0x09, 0x6e, 0x6f, 0x64, 0x65, 0x5f, 0x69, + 0x6e, 0x66, 0x6f, 0x18, 0x02, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x1a, 0x2e, 0x6e, 0x65, 0x6f, 0x2e, + 0x66, 0x73, 0x2e, 0x76, 0x32, 0x2e, 0x6e, 0x65, 0x74, 0x6d, 0x61, 0x70, 0x2e, 0x4e, 0x6f, 0x64, + 0x65, 0x49, 0x6e, 0x66, 0x6f, 0x52, 0x08, 0x6e, 0x6f, 0x64, 0x65, 0x49, 0x6e, 0x66, 0x6f, 0x22, + 0xf5, 0x01, 0x0a, 0x12, 0x4e, 0x65, 0x74, 0x77, 0x6f, 0x72, 0x6b, 0x49, 0x6e, 0x66, 0x6f, 0x52, + 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x12, 0x3d, 0x0a, 0x04, 0x62, 0x6f, 0x64, 0x79, 0x18, 0x01, + 0x20, 0x01, 0x28, 0x0b, 0x32, 0x29, 0x2e, 0x6e, 0x65, 0x6f, 0x2e, 0x66, 0x73, 0x2e, 0x76, 0x32, + 0x2e, 0x6e, 0x65, 0x74, 0x6d, 0x61, 0x70, 0x2e, 0x4e, 0x65, 0x74, 0x77, 0x6f, 0x72, 0x6b, 0x49, + 0x6e, 0x66, 0x6f, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x2e, 0x42, 0x6f, 0x64, 0x79, 0x52, + 0x04, 0x62, 0x6f, 0x64, 0x79, 0x12, 0x45, 0x0a, 0x0b, 0x6d, 0x65, 0x74, 0x61, 0x5f, 0x68, 0x65, + 0x61, 0x64, 0x65, 0x72, 0x18, 0x02, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x24, 0x2e, 0x6e, 0x65, 0x6f, + 0x2e, 0x66, 0x73, 0x2e, 0x76, 0x32, 0x2e, 0x73, 0x65, 0x73, 0x73, 0x69, 0x6f, 0x6e, 0x2e, 0x52, + 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x4d, 0x65, 0x74, 0x61, 0x48, 0x65, 0x61, 0x64, 0x65, 0x72, + 0x52, 0x0a, 0x6d, 0x65, 0x74, 0x61, 0x48, 0x65, 0x61, 0x64, 0x65, 0x72, 0x12, 0x51, 0x0a, 0x0d, + 0x76, 0x65, 0x72, 0x69, 0x66, 0x79, 0x5f, 0x68, 0x65, 0x61, 0x64, 0x65, 0x72, 0x18, 0x03, 0x20, + 0x01, 0x28, 0x0b, 0x32, 0x2c, 0x2e, 0x6e, 0x65, 0x6f, 0x2e, 0x66, 0x73, 0x2e, 0x76, 0x32, 0x2e, + 0x73, 0x65, 0x73, 0x73, 0x69, 0x6f, 0x6e, 0x2e, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x56, + 0x65, 0x72, 0x69, 0x66, 0x69, 0x63, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x48, 0x65, 0x61, 0x64, 0x65, + 0x72, 0x52, 0x0c, 0x76, 0x65, 0x72, 0x69, 0x66, 0x79, 0x48, 0x65, 0x61, 0x64, 0x65, 0x72, 0x1a, + 0x06, 0x0a, 0x04, 0x42, 0x6f, 0x64, 0x79, 0x22, 0xbb, 0x02, 0x0a, 0x13, 0x4e, 0x65, 0x74, 0x77, + 0x6f, 0x72, 0x6b, 0x49, 0x6e, 0x66, 0x6f, 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x12, + 0x3e, 0x0a, 0x04, 0x62, 0x6f, 0x64, 0x79, 0x18, 0x01, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x2a, 0x2e, + 0x6e, 0x65, 0x6f, 0x2e, 0x66, 0x73, 0x2e, 0x76, 0x32, 0x2e, 0x6e, 0x65, 0x74, 0x6d, 0x61, 0x70, + 0x2e, 0x4e, 0x65, 0x74, 0x77, 0x6f, 0x72, 0x6b, 0x49, 0x6e, 0x66, 0x6f, 0x52, 0x65, 0x73, 0x70, + 0x6f, 0x6e, 0x73, 0x65, 0x2e, 0x42, 0x6f, 0x64, 0x79, 0x52, 0x04, 0x62, 0x6f, 0x64, 0x79, 0x12, + 0x46, 0x0a, 0x0b, 0x6d, 0x65, 0x74, 0x61, 0x5f, 0x68, 0x65, 0x61, 0x64, 0x65, 0x72, 0x18, 0x02, + 0x20, 0x01, 0x28, 0x0b, 0x32, 0x25, 0x2e, 0x6e, 0x65, 0x6f, 0x2e, 0x66, 0x73, 0x2e, 0x76, 0x32, + 0x2e, 0x73, 0x65, 0x73, 0x73, 0x69, 0x6f, 0x6e, 0x2e, 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, + 0x65, 0x4d, 0x65, 0x74, 0x61, 0x48, 0x65, 0x61, 0x64, 0x65, 0x72, 0x52, 0x0a, 0x6d, 0x65, 0x74, + 0x61, 0x48, 0x65, 0x61, 0x64, 0x65, 0x72, 0x12, 0x52, 0x0a, 0x0d, 0x76, 0x65, 0x72, 0x69, 0x66, + 0x79, 0x5f, 0x68, 0x65, 0x61, 0x64, 0x65, 0x72, 0x18, 0x03, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x2d, + 0x2e, 0x6e, 0x65, 0x6f, 0x2e, 0x66, 0x73, 0x2e, 0x76, 0x32, 0x2e, 0x73, 0x65, 0x73, 0x73, 0x69, + 0x6f, 0x6e, 0x2e, 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x56, 0x65, 0x72, 0x69, 0x66, + 0x69, 0x63, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x48, 0x65, 0x61, 0x64, 0x65, 0x72, 0x52, 0x0c, 0x76, + 0x65, 0x72, 0x69, 0x66, 0x79, 0x48, 0x65, 0x61, 0x64, 0x65, 0x72, 0x1a, 0x48, 0x0a, 0x04, 0x42, + 0x6f, 0x64, 0x79, 0x12, 0x40, 0x0a, 0x0c, 0x6e, 0x65, 0x74, 0x77, 0x6f, 0x72, 0x6b, 0x5f, 0x69, + 0x6e, 0x66, 0x6f, 0x18, 0x01, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x1d, 0x2e, 0x6e, 0x65, 0x6f, 0x2e, + 0x66, 0x73, 0x2e, 0x76, 0x32, 0x2e, 0x6e, 0x65, 0x74, 0x6d, 0x61, 0x70, 0x2e, 0x4e, 0x65, 0x74, + 0x77, 0x6f, 0x72, 0x6b, 0x49, 0x6e, 0x66, 0x6f, 0x52, 0x0b, 0x6e, 0x65, 0x74, 0x77, 0x6f, 0x72, + 0x6b, 0x49, 0x6e, 0x66, 0x6f, 0x22, 0xfb, 0x01, 0x0a, 0x15, 0x4e, 0x65, 0x74, 0x6d, 0x61, 0x70, + 0x53, 0x6e, 0x61, 0x70, 0x73, 0x68, 0x6f, 0x74, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x12, + 0x40, 0x0a, 0x04, 0x62, 0x6f, 0x64, 0x79, 0x18, 0x01, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x2c, 0x2e, + 0x6e, 0x65, 0x6f, 0x2e, 0x66, 0x73, 0x2e, 0x76, 0x32, 0x2e, 0x6e, 0x65, 0x74, 0x6d, 0x61, 0x70, + 0x2e, 0x4e, 0x65, 0x74, 0x6d, 0x61, 0x70, 0x53, 0x6e, 0x61, 0x70, 0x73, 0x68, 0x6f, 0x74, 0x52, + 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x2e, 0x42, 0x6f, 0x64, 0x79, 0x52, 0x04, 0x62, 0x6f, 0x64, + 0x79, 0x12, 0x45, 0x0a, 0x0b, 0x6d, 0x65, 0x74, 0x61, 0x5f, 0x68, 0x65, 0x61, 0x64, 0x65, 0x72, + 0x18, 0x02, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x24, 0x2e, 0x6e, 0x65, 0x6f, 0x2e, 0x66, 0x73, 0x2e, + 0x76, 0x32, 0x2e, 0x73, 0x65, 0x73, 0x73, 0x69, 0x6f, 0x6e, 0x2e, 0x52, 0x65, 0x71, 0x75, 0x65, + 0x73, 0x74, 0x4d, 0x65, 0x74, 0x61, 0x48, 0x65, 0x61, 0x64, 0x65, 0x72, 0x52, 0x0a, 0x6d, 0x65, + 0x74, 0x61, 0x48, 0x65, 0x61, 0x64, 0x65, 0x72, 0x12, 0x51, 0x0a, 0x0d, 0x76, 0x65, 0x72, 0x69, + 0x66, 0x79, 0x5f, 0x68, 0x65, 0x61, 0x64, 0x65, 0x72, 0x18, 0x03, 0x20, 0x01, 0x28, 0x0b, 0x32, + 0x2c, 0x2e, 0x6e, 0x65, 0x6f, 0x2e, 0x66, 0x73, 0x2e, 0x76, 0x32, 0x2e, 0x73, 0x65, 0x73, 0x73, + 0x69, 0x6f, 0x6e, 0x2e, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x56, 0x65, 0x72, 0x69, 0x66, + 0x69, 0x63, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x48, 0x65, 0x61, 0x64, 0x65, 0x72, 0x52, 0x0c, 0x76, + 0x65, 0x72, 0x69, 0x66, 0x79, 0x48, 0x65, 0x61, 0x64, 0x65, 0x72, 0x1a, 0x06, 0x0a, 0x04, 0x42, + 0x6f, 0x64, 0x79, 0x22, 0xb1, 0x02, 0x0a, 0x16, 0x4e, 0x65, 0x74, 0x6d, 0x61, 0x70, 0x53, 0x6e, + 0x61, 0x70, 0x73, 0x68, 0x6f, 0x74, 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x12, 0x41, + 0x0a, 0x04, 0x62, 0x6f, 0x64, 0x79, 0x18, 0x01, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x2d, 0x2e, 0x6e, + 0x65, 0x6f, 0x2e, 0x66, 0x73, 0x2e, 0x76, 0x32, 0x2e, 0x6e, 0x65, 0x74, 0x6d, 0x61, 0x70, 0x2e, + 0x4e, 0x65, 0x74, 0x6d, 0x61, 0x70, 0x53, 0x6e, 0x61, 0x70, 0x73, 0x68, 0x6f, 0x74, 0x52, 0x65, + 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x2e, 0x42, 0x6f, 0x64, 0x79, 0x52, 0x04, 0x62, 0x6f, 0x64, + 0x79, 0x12, 0x46, 0x0a, 0x0b, 0x6d, 0x65, 0x74, 0x61, 0x5f, 0x68, 0x65, 0x61, 0x64, 0x65, 0x72, + 0x18, 0x02, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x25, 0x2e, 0x6e, 0x65, 0x6f, 0x2e, 0x66, 0x73, 0x2e, + 0x76, 0x32, 0x2e, 0x73, 0x65, 0x73, 0x73, 0x69, 0x6f, 0x6e, 0x2e, 0x52, 0x65, 0x73, 0x70, 0x6f, + 0x6e, 0x73, 0x65, 0x4d, 0x65, 0x74, 0x61, 0x48, 0x65, 0x61, 0x64, 0x65, 0x72, 0x52, 0x0a, 0x6d, + 0x65, 0x74, 0x61, 0x48, 0x65, 0x61, 0x64, 0x65, 0x72, 0x12, 0x52, 0x0a, 0x0d, 0x76, 0x65, 0x72, + 0x69, 0x66, 0x79, 0x5f, 0x68, 0x65, 0x61, 0x64, 0x65, 0x72, 0x18, 0x03, 0x20, 0x01, 0x28, 0x0b, + 0x32, 0x2d, 0x2e, 0x6e, 0x65, 0x6f, 0x2e, 0x66, 0x73, 0x2e, 0x76, 0x32, 0x2e, 0x73, 0x65, 0x73, + 0x73, 0x69, 0x6f, 0x6e, 0x2e, 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x56, 0x65, 0x72, + 0x69, 0x66, 0x69, 0x63, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x48, 0x65, 0x61, 0x64, 0x65, 0x72, 0x52, + 0x0c, 0x76, 0x65, 0x72, 0x69, 0x66, 0x79, 0x48, 0x65, 0x61, 0x64, 0x65, 0x72, 0x1a, 0x38, 0x0a, + 0x04, 0x42, 0x6f, 0x64, 0x79, 0x12, 0x30, 0x0a, 0x06, 0x6e, 0x65, 0x74, 0x6d, 0x61, 0x70, 0x18, + 0x01, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x18, 0x2e, 0x6e, 0x65, 0x6f, 0x2e, 0x66, 0x73, 0x2e, 0x76, + 0x32, 0x2e, 0x6e, 0x65, 0x74, 0x6d, 0x61, 0x70, 0x2e, 0x4e, 0x65, 0x74, 0x6d, 0x61, 0x70, 0x52, + 0x06, 0x6e, 0x65, 0x74, 0x6d, 0x61, 0x70, 0x32, 0xb2, 0x02, 0x0a, 0x0d, 0x4e, 0x65, 0x74, 0x6d, + 0x61, 0x70, 0x53, 0x65, 0x72, 0x76, 0x69, 0x63, 0x65, 0x12, 0x60, 0x0a, 0x0d, 0x4c, 0x6f, 0x63, + 0x61, 0x6c, 0x4e, 0x6f, 0x64, 0x65, 0x49, 0x6e, 0x66, 0x6f, 0x12, 0x26, 0x2e, 0x6e, 0x65, 0x6f, + 0x2e, 0x66, 0x73, 0x2e, 0x76, 0x32, 0x2e, 0x6e, 0x65, 0x74, 0x6d, 0x61, 0x70, 0x2e, 0x4c, 0x6f, + 0x63, 0x61, 0x6c, 0x4e, 0x6f, 0x64, 0x65, 0x49, 0x6e, 0x66, 0x6f, 0x52, 0x65, 0x71, 0x75, 0x65, + 0x73, 0x74, 0x1a, 0x27, 0x2e, 0x6e, 0x65, 0x6f, 0x2e, 0x66, 0x73, 0x2e, 0x76, 0x32, 0x2e, 0x6e, + 0x65, 0x74, 0x6d, 0x61, 0x70, 0x2e, 0x4c, 0x6f, 0x63, 0x61, 0x6c, 0x4e, 0x6f, 0x64, 0x65, 0x49, + 0x6e, 0x66, 0x6f, 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x12, 0x5a, 0x0a, 0x0b, 0x4e, + 0x65, 0x74, 0x77, 0x6f, 0x72, 0x6b, 0x49, 0x6e, 0x66, 0x6f, 0x12, 0x24, 0x2e, 0x6e, 0x65, 0x6f, + 0x2e, 0x66, 0x73, 0x2e, 0x76, 0x32, 0x2e, 0x6e, 0x65, 0x74, 0x6d, 0x61, 0x70, 0x2e, 0x4e, 0x65, + 0x74, 0x77, 0x6f, 0x72, 0x6b, 0x49, 0x6e, 0x66, 0x6f, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, + 0x1a, 0x25, 0x2e, 0x6e, 0x65, 0x6f, 0x2e, 0x66, 0x73, 0x2e, 0x76, 0x32, 0x2e, 0x6e, 0x65, 0x74, + 0x6d, 0x61, 0x70, 0x2e, 0x4e, 0x65, 0x74, 0x77, 0x6f, 0x72, 0x6b, 0x49, 0x6e, 0x66, 0x6f, 0x52, + 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x12, 0x63, 0x0a, 0x0e, 0x4e, 0x65, 0x74, 0x6d, 0x61, + 0x70, 0x53, 0x6e, 0x61, 0x70, 0x73, 0x68, 0x6f, 0x74, 0x12, 0x27, 0x2e, 0x6e, 0x65, 0x6f, 0x2e, + 0x66, 0x73, 0x2e, 0x76, 0x32, 0x2e, 0x6e, 0x65, 0x74, 0x6d, 0x61, 0x70, 0x2e, 0x4e, 0x65, 0x74, + 0x6d, 0x61, 0x70, 0x53, 0x6e, 0x61, 0x70, 0x73, 0x68, 0x6f, 0x74, 0x52, 0x65, 0x71, 0x75, 0x65, + 0x73, 0x74, 0x1a, 0x28, 0x2e, 0x6e, 0x65, 0x6f, 0x2e, 0x66, 0x73, 0x2e, 0x76, 0x32, 0x2e, 0x6e, + 0x65, 0x74, 0x6d, 0x61, 0x70, 0x2e, 0x4e, 0x65, 0x74, 0x6d, 0x61, 0x70, 0x53, 0x6e, 0x61, 0x70, + 0x73, 0x68, 0x6f, 0x74, 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x42, 0x56, 0x5a, 0x37, + 0x67, 0x69, 0x74, 0x68, 0x75, 0x62, 0x2e, 0x63, 0x6f, 0x6d, 0x2f, 0x6e, 0x73, 0x70, 0x63, 0x63, + 0x2d, 0x64, 0x65, 0x76, 0x2f, 0x6e, 0x65, 0x6f, 0x66, 0x73, 0x2d, 0x61, 0x70, 0x69, 0x2d, 0x67, + 0x6f, 0x2f, 0x76, 0x32, 0x2f, 0x6e, 0x65, 0x74, 0x6d, 0x61, 0x70, 0x2f, 0x67, 0x72, 0x70, 0x63, + 0x3b, 0x6e, 0x65, 0x74, 0x6d, 0x61, 0x70, 0xaa, 0x02, 0x1a, 0x4e, 0x65, 0x6f, 0x2e, 0x46, 0x69, + 0x6c, 0x65, 0x53, 0x74, 0x6f, 0x72, 0x61, 0x67, 0x65, 0x2e, 0x41, 0x50, 0x49, 0x2e, 0x4e, 0x65, + 0x74, 0x6d, 0x61, 0x70, 0x62, 0x06, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x33, +} + +var ( + file_netmap_grpc_service_proto_rawDescOnce sync.Once + file_netmap_grpc_service_proto_rawDescData = file_netmap_grpc_service_proto_rawDesc +) + +func file_netmap_grpc_service_proto_rawDescGZIP() []byte { + file_netmap_grpc_service_proto_rawDescOnce.Do(func() { + file_netmap_grpc_service_proto_rawDescData = protoimpl.X.CompressGZIP(file_netmap_grpc_service_proto_rawDescData) + }) + return file_netmap_grpc_service_proto_rawDescData +} + +var file_netmap_grpc_service_proto_msgTypes = make([]protoimpl.MessageInfo, 12) +var file_netmap_grpc_service_proto_goTypes = []interface{}{ + (*LocalNodeInfoRequest)(nil), // 0: neo.fs.v2.netmap.LocalNodeInfoRequest + (*LocalNodeInfoResponse)(nil), // 1: neo.fs.v2.netmap.LocalNodeInfoResponse + (*NetworkInfoRequest)(nil), // 2: neo.fs.v2.netmap.NetworkInfoRequest + (*NetworkInfoResponse)(nil), // 3: neo.fs.v2.netmap.NetworkInfoResponse + (*NetmapSnapshotRequest)(nil), // 4: neo.fs.v2.netmap.NetmapSnapshotRequest + (*NetmapSnapshotResponse)(nil), // 5: neo.fs.v2.netmap.NetmapSnapshotResponse + (*LocalNodeInfoRequest_Body)(nil), // 6: neo.fs.v2.netmap.LocalNodeInfoRequest.Body + (*LocalNodeInfoResponse_Body)(nil), // 7: neo.fs.v2.netmap.LocalNodeInfoResponse.Body + (*NetworkInfoRequest_Body)(nil), // 8: neo.fs.v2.netmap.NetworkInfoRequest.Body + (*NetworkInfoResponse_Body)(nil), // 9: neo.fs.v2.netmap.NetworkInfoResponse.Body + (*NetmapSnapshotRequest_Body)(nil), // 10: neo.fs.v2.netmap.NetmapSnapshotRequest.Body + (*NetmapSnapshotResponse_Body)(nil), // 11: neo.fs.v2.netmap.NetmapSnapshotResponse.Body + (*session.RequestMetaHeader)(nil), // 12: neo.fs.v2.session.RequestMetaHeader + (*session.RequestVerificationHeader)(nil), // 13: neo.fs.v2.session.RequestVerificationHeader + (*session.ResponseMetaHeader)(nil), // 14: neo.fs.v2.session.ResponseMetaHeader + (*session.ResponseVerificationHeader)(nil), // 15: neo.fs.v2.session.ResponseVerificationHeader + (*refs.Version)(nil), // 16: neo.fs.v2.refs.Version + (*NodeInfo)(nil), // 17: neo.fs.v2.netmap.NodeInfo + (*NetworkInfo)(nil), // 18: neo.fs.v2.netmap.NetworkInfo + (*Netmap)(nil), // 19: neo.fs.v2.netmap.Netmap +} +var file_netmap_grpc_service_proto_depIdxs = []int32{ + 6, // 0: neo.fs.v2.netmap.LocalNodeInfoRequest.body:type_name -> neo.fs.v2.netmap.LocalNodeInfoRequest.Body + 12, // 1: neo.fs.v2.netmap.LocalNodeInfoRequest.meta_header:type_name -> neo.fs.v2.session.RequestMetaHeader + 13, // 2: neo.fs.v2.netmap.LocalNodeInfoRequest.verify_header:type_name -> neo.fs.v2.session.RequestVerificationHeader + 7, // 3: neo.fs.v2.netmap.LocalNodeInfoResponse.body:type_name -> neo.fs.v2.netmap.LocalNodeInfoResponse.Body + 14, // 4: neo.fs.v2.netmap.LocalNodeInfoResponse.meta_header:type_name -> neo.fs.v2.session.ResponseMetaHeader + 15, // 5: neo.fs.v2.netmap.LocalNodeInfoResponse.verify_header:type_name -> neo.fs.v2.session.ResponseVerificationHeader + 8, // 6: neo.fs.v2.netmap.NetworkInfoRequest.body:type_name -> neo.fs.v2.netmap.NetworkInfoRequest.Body + 12, // 7: neo.fs.v2.netmap.NetworkInfoRequest.meta_header:type_name -> neo.fs.v2.session.RequestMetaHeader + 13, // 8: neo.fs.v2.netmap.NetworkInfoRequest.verify_header:type_name -> neo.fs.v2.session.RequestVerificationHeader + 9, // 9: neo.fs.v2.netmap.NetworkInfoResponse.body:type_name -> neo.fs.v2.netmap.NetworkInfoResponse.Body + 14, // 10: neo.fs.v2.netmap.NetworkInfoResponse.meta_header:type_name -> neo.fs.v2.session.ResponseMetaHeader + 15, // 11: neo.fs.v2.netmap.NetworkInfoResponse.verify_header:type_name -> neo.fs.v2.session.ResponseVerificationHeader + 10, // 12: neo.fs.v2.netmap.NetmapSnapshotRequest.body:type_name -> neo.fs.v2.netmap.NetmapSnapshotRequest.Body + 12, // 13: neo.fs.v2.netmap.NetmapSnapshotRequest.meta_header:type_name -> neo.fs.v2.session.RequestMetaHeader + 13, // 14: neo.fs.v2.netmap.NetmapSnapshotRequest.verify_header:type_name -> neo.fs.v2.session.RequestVerificationHeader + 11, // 15: neo.fs.v2.netmap.NetmapSnapshotResponse.body:type_name -> neo.fs.v2.netmap.NetmapSnapshotResponse.Body + 14, // 16: neo.fs.v2.netmap.NetmapSnapshotResponse.meta_header:type_name -> neo.fs.v2.session.ResponseMetaHeader + 15, // 17: neo.fs.v2.netmap.NetmapSnapshotResponse.verify_header:type_name -> neo.fs.v2.session.ResponseVerificationHeader + 16, // 18: neo.fs.v2.netmap.LocalNodeInfoResponse.Body.version:type_name -> neo.fs.v2.refs.Version + 17, // 19: neo.fs.v2.netmap.LocalNodeInfoResponse.Body.node_info:type_name -> neo.fs.v2.netmap.NodeInfo + 18, // 20: neo.fs.v2.netmap.NetworkInfoResponse.Body.network_info:type_name -> neo.fs.v2.netmap.NetworkInfo + 19, // 21: neo.fs.v2.netmap.NetmapSnapshotResponse.Body.netmap:type_name -> neo.fs.v2.netmap.Netmap + 0, // 22: neo.fs.v2.netmap.NetmapService.LocalNodeInfo:input_type -> neo.fs.v2.netmap.LocalNodeInfoRequest + 2, // 23: neo.fs.v2.netmap.NetmapService.NetworkInfo:input_type -> neo.fs.v2.netmap.NetworkInfoRequest + 4, // 24: neo.fs.v2.netmap.NetmapService.NetmapSnapshot:input_type -> neo.fs.v2.netmap.NetmapSnapshotRequest + 1, // 25: neo.fs.v2.netmap.NetmapService.LocalNodeInfo:output_type -> neo.fs.v2.netmap.LocalNodeInfoResponse + 3, // 26: neo.fs.v2.netmap.NetmapService.NetworkInfo:output_type -> neo.fs.v2.netmap.NetworkInfoResponse + 5, // 27: neo.fs.v2.netmap.NetmapService.NetmapSnapshot:output_type -> neo.fs.v2.netmap.NetmapSnapshotResponse + 25, // [25:28] is the sub-list for method output_type + 22, // [22:25] is the sub-list for method input_type + 22, // [22:22] is the sub-list for extension type_name + 22, // [22:22] is the sub-list for extension extendee + 0, // [0:22] is the sub-list for field type_name +} + +func init() { file_netmap_grpc_service_proto_init() } +func file_netmap_grpc_service_proto_init() { + if File_netmap_grpc_service_proto != nil { + return + } + file_netmap_grpc_types_proto_init() + if !protoimpl.UnsafeEnabled { + file_netmap_grpc_service_proto_msgTypes[0].Exporter = func(v interface{}, i int) interface{} { + switch v := v.(*LocalNodeInfoRequest); i { + case 0: + return &v.state + case 1: + return &v.sizeCache + case 2: + return &v.unknownFields + default: + return nil + } + } + file_netmap_grpc_service_proto_msgTypes[1].Exporter = func(v interface{}, i int) interface{} { + switch v := v.(*LocalNodeInfoResponse); i { + case 0: + return &v.state + case 1: + return &v.sizeCache + case 2: + return &v.unknownFields + default: + return nil + } + } + file_netmap_grpc_service_proto_msgTypes[2].Exporter = func(v interface{}, i int) interface{} { + switch v := v.(*NetworkInfoRequest); i { + case 0: + return &v.state + case 1: + return &v.sizeCache + case 2: + return &v.unknownFields + default: + return nil + } + } + file_netmap_grpc_service_proto_msgTypes[3].Exporter = func(v interface{}, i int) interface{} { + switch v := v.(*NetworkInfoResponse); i { + case 0: + return &v.state + case 1: + return &v.sizeCache + case 2: + return &v.unknownFields + default: + return nil + } + } + file_netmap_grpc_service_proto_msgTypes[4].Exporter = func(v interface{}, i int) interface{} { + switch v := v.(*NetmapSnapshotRequest); i { + case 0: + return &v.state + case 1: + return &v.sizeCache + case 2: + return &v.unknownFields + default: + return nil + } + } + file_netmap_grpc_service_proto_msgTypes[5].Exporter = func(v interface{}, i int) interface{} { + switch v := v.(*NetmapSnapshotResponse); i { + case 0: + return &v.state + case 1: + return &v.sizeCache + case 2: + return &v.unknownFields + default: + return nil + } + } + file_netmap_grpc_service_proto_msgTypes[6].Exporter = func(v interface{}, i int) interface{} { + switch v := v.(*LocalNodeInfoRequest_Body); i { + case 0: + return &v.state + case 1: + return &v.sizeCache + case 2: + return &v.unknownFields + default: + return nil + } + } + file_netmap_grpc_service_proto_msgTypes[7].Exporter = func(v interface{}, i int) interface{} { + switch v := v.(*LocalNodeInfoResponse_Body); i { + case 0: + return &v.state + case 1: + return &v.sizeCache + case 2: + return &v.unknownFields + default: + return nil + } + } + file_netmap_grpc_service_proto_msgTypes[8].Exporter = func(v interface{}, i int) interface{} { + switch v := v.(*NetworkInfoRequest_Body); i { + case 0: + return &v.state + case 1: + return &v.sizeCache + case 2: + return &v.unknownFields + default: + return nil + } + } + file_netmap_grpc_service_proto_msgTypes[9].Exporter = func(v interface{}, i int) interface{} { + switch v := v.(*NetworkInfoResponse_Body); i { + case 0: + return &v.state + case 1: + return &v.sizeCache + case 2: + return &v.unknownFields + default: + return nil + } + } + file_netmap_grpc_service_proto_msgTypes[10].Exporter = func(v interface{}, i int) interface{} { + switch v := v.(*NetmapSnapshotRequest_Body); i { + case 0: + return &v.state + case 1: + return &v.sizeCache + case 2: + return &v.unknownFields + default: + return nil + } + } + file_netmap_grpc_service_proto_msgTypes[11].Exporter = func(v interface{}, i int) interface{} { + switch v := v.(*NetmapSnapshotResponse_Body); i { + case 0: + return &v.state + case 1: + return &v.sizeCache + case 2: + return &v.unknownFields + default: + return nil + } + } + } + type x struct{} + out := protoimpl.TypeBuilder{ + File: protoimpl.DescBuilder{ + GoPackagePath: reflect.TypeOf(x{}).PkgPath(), + RawDescriptor: file_netmap_grpc_service_proto_rawDesc, + NumEnums: 0, + NumMessages: 12, + NumExtensions: 0, + NumServices: 1, + }, + GoTypes: file_netmap_grpc_service_proto_goTypes, + DependencyIndexes: file_netmap_grpc_service_proto_depIdxs, + MessageInfos: file_netmap_grpc_service_proto_msgTypes, + }.Build() + File_netmap_grpc_service_proto = out.File + file_netmap_grpc_service_proto_rawDesc = nil + file_netmap_grpc_service_proto_goTypes = nil + file_netmap_grpc_service_proto_depIdxs = nil +} diff --git a/api/netmap/service_grpc.pb.go b/api/netmap/service_grpc.pb.go new file mode 100644 index 000000000..69c702e45 --- /dev/null +++ b/api/netmap/service_grpc.pb.go @@ -0,0 +1,226 @@ +// Code generated by protoc-gen-go-grpc. DO NOT EDIT. +// versions: +// - protoc-gen-go-grpc v1.3.0 +// - protoc v4.25.1 +// source: netmap/grpc/service.proto + +package netmap + +import ( + "context" + + "google.golang.org/grpc" + "google.golang.org/grpc/codes" + "google.golang.org/grpc/status" +) + +// This is a compile-time assertion to ensure that this generated file +// is compatible with the grpc package it is being compiled against. +// Requires gRPC-Go v1.32.0 or later. +const _ = grpc.SupportPackageIsVersion7 + +const ( + NetmapService_LocalNodeInfo_FullMethodName = "/neo.fs.v2.netmap.NetmapService/LocalNodeInfo" + NetmapService_NetworkInfo_FullMethodName = "/neo.fs.v2.netmap.NetmapService/NetworkInfo" + NetmapService_NetmapSnapshot_FullMethodName = "/neo.fs.v2.netmap.NetmapService/NetmapSnapshot" +) + +// NetmapServiceClient is the client API for NetmapService service. +// +// For semantics around ctx use and closing/ending streaming RPCs, please refer to https://pkg.go.dev/google.golang.org/grpc/?tab=doc#ClientConn.NewStream. +type NetmapServiceClient interface { + // Get NodeInfo structure from the particular node directly. + // Node information can be taken from `Netmap` smart contract. In some cases, though, + // one may want to get recent information directly or to talk to the node not yet + // present in the `Network Map` to find out what API version can be used for + // further communication. This can be also used to check if a node is up and running. + // + // Statuses: + // - **OK** (0, SECTION_SUCCESS): + // information about the server has been successfully read; + // - Common failures (SECTION_FAILURE_COMMON). + LocalNodeInfo(ctx context.Context, in *LocalNodeInfoRequest, opts ...grpc.CallOption) (*LocalNodeInfoResponse, error) + // Read recent information about the NeoFS network. + // + // Statuses: + // - **OK** (0, SECTION_SUCCESS): + // information about the current network state has been successfully read; + // - Common failures (SECTION_FAILURE_COMMON). + NetworkInfo(ctx context.Context, in *NetworkInfoRequest, opts ...grpc.CallOption) (*NetworkInfoResponse, error) + // Returns network map snapshot of the current NeoFS epoch. + // + // Statuses: + // - **OK** (0, SECTION_SUCCESS): + // information about the current network map has been successfully read; + // - Common failures (SECTION_FAILURE_COMMON). + NetmapSnapshot(ctx context.Context, in *NetmapSnapshotRequest, opts ...grpc.CallOption) (*NetmapSnapshotResponse, error) +} + +type netmapServiceClient struct { + cc grpc.ClientConnInterface +} + +func NewNetmapServiceClient(cc grpc.ClientConnInterface) NetmapServiceClient { + return &netmapServiceClient{cc} +} + +func (c *netmapServiceClient) LocalNodeInfo(ctx context.Context, in *LocalNodeInfoRequest, opts ...grpc.CallOption) (*LocalNodeInfoResponse, error) { + out := new(LocalNodeInfoResponse) + err := c.cc.Invoke(ctx, NetmapService_LocalNodeInfo_FullMethodName, in, out, opts...) + if err != nil { + return nil, err + } + return out, nil +} + +func (c *netmapServiceClient) NetworkInfo(ctx context.Context, in *NetworkInfoRequest, opts ...grpc.CallOption) (*NetworkInfoResponse, error) { + out := new(NetworkInfoResponse) + err := c.cc.Invoke(ctx, NetmapService_NetworkInfo_FullMethodName, in, out, opts...) + if err != nil { + return nil, err + } + return out, nil +} + +func (c *netmapServiceClient) NetmapSnapshot(ctx context.Context, in *NetmapSnapshotRequest, opts ...grpc.CallOption) (*NetmapSnapshotResponse, error) { + out := new(NetmapSnapshotResponse) + err := c.cc.Invoke(ctx, NetmapService_NetmapSnapshot_FullMethodName, in, out, opts...) + if err != nil { + return nil, err + } + return out, nil +} + +// NetmapServiceServer is the server API for NetmapService service. +// All implementations should embed UnimplementedNetmapServiceServer +// for forward compatibility +type NetmapServiceServer interface { + // Get NodeInfo structure from the particular node directly. + // Node information can be taken from `Netmap` smart contract. In some cases, though, + // one may want to get recent information directly or to talk to the node not yet + // present in the `Network Map` to find out what API version can be used for + // further communication. This can be also used to check if a node is up and running. + // + // Statuses: + // - **OK** (0, SECTION_SUCCESS): + // information about the server has been successfully read; + // - Common failures (SECTION_FAILURE_COMMON). + LocalNodeInfo(context.Context, *LocalNodeInfoRequest) (*LocalNodeInfoResponse, error) + // Read recent information about the NeoFS network. + // + // Statuses: + // - **OK** (0, SECTION_SUCCESS): + // information about the current network state has been successfully read; + // - Common failures (SECTION_FAILURE_COMMON). + NetworkInfo(context.Context, *NetworkInfoRequest) (*NetworkInfoResponse, error) + // Returns network map snapshot of the current NeoFS epoch. + // + // Statuses: + // - **OK** (0, SECTION_SUCCESS): + // information about the current network map has been successfully read; + // - Common failures (SECTION_FAILURE_COMMON). + NetmapSnapshot(context.Context, *NetmapSnapshotRequest) (*NetmapSnapshotResponse, error) +} + +// UnimplementedNetmapServiceServer should be embedded to have forward compatible implementations. +type UnimplementedNetmapServiceServer struct { +} + +func (UnimplementedNetmapServiceServer) LocalNodeInfo(context.Context, *LocalNodeInfoRequest) (*LocalNodeInfoResponse, error) { + return nil, status.Errorf(codes.Unimplemented, "method LocalNodeInfo not implemented") +} +func (UnimplementedNetmapServiceServer) NetworkInfo(context.Context, *NetworkInfoRequest) (*NetworkInfoResponse, error) { + return nil, status.Errorf(codes.Unimplemented, "method NetworkInfo not implemented") +} +func (UnimplementedNetmapServiceServer) NetmapSnapshot(context.Context, *NetmapSnapshotRequest) (*NetmapSnapshotResponse, error) { + return nil, status.Errorf(codes.Unimplemented, "method NetmapSnapshot not implemented") +} + +// UnsafeNetmapServiceServer may be embedded to opt out of forward compatibility for this service. +// Use of this interface is not recommended, as added methods to NetmapServiceServer will +// result in compilation errors. +type UnsafeNetmapServiceServer interface { + mustEmbedUnimplementedNetmapServiceServer() +} + +func RegisterNetmapServiceServer(s grpc.ServiceRegistrar, srv NetmapServiceServer) { + s.RegisterService(&NetmapService_ServiceDesc, srv) +} + +func _NetmapService_LocalNodeInfo_Handler(srv interface{}, ctx context.Context, dec func(interface{}) error, interceptor grpc.UnaryServerInterceptor) (interface{}, error) { + in := new(LocalNodeInfoRequest) + if err := dec(in); err != nil { + return nil, err + } + if interceptor == nil { + return srv.(NetmapServiceServer).LocalNodeInfo(ctx, in) + } + info := &grpc.UnaryServerInfo{ + Server: srv, + FullMethod: NetmapService_LocalNodeInfo_FullMethodName, + } + handler := func(ctx context.Context, req interface{}) (interface{}, error) { + return srv.(NetmapServiceServer).LocalNodeInfo(ctx, req.(*LocalNodeInfoRequest)) + } + return interceptor(ctx, in, info, handler) +} + +func _NetmapService_NetworkInfo_Handler(srv interface{}, ctx context.Context, dec func(interface{}) error, interceptor grpc.UnaryServerInterceptor) (interface{}, error) { + in := new(NetworkInfoRequest) + if err := dec(in); err != nil { + return nil, err + } + if interceptor == nil { + return srv.(NetmapServiceServer).NetworkInfo(ctx, in) + } + info := &grpc.UnaryServerInfo{ + Server: srv, + FullMethod: NetmapService_NetworkInfo_FullMethodName, + } + handler := func(ctx context.Context, req interface{}) (interface{}, error) { + return srv.(NetmapServiceServer).NetworkInfo(ctx, req.(*NetworkInfoRequest)) + } + return interceptor(ctx, in, info, handler) +} + +func _NetmapService_NetmapSnapshot_Handler(srv interface{}, ctx context.Context, dec func(interface{}) error, interceptor grpc.UnaryServerInterceptor) (interface{}, error) { + in := new(NetmapSnapshotRequest) + if err := dec(in); err != nil { + return nil, err + } + if interceptor == nil { + return srv.(NetmapServiceServer).NetmapSnapshot(ctx, in) + } + info := &grpc.UnaryServerInfo{ + Server: srv, + FullMethod: NetmapService_NetmapSnapshot_FullMethodName, + } + handler := func(ctx context.Context, req interface{}) (interface{}, error) { + return srv.(NetmapServiceServer).NetmapSnapshot(ctx, req.(*NetmapSnapshotRequest)) + } + return interceptor(ctx, in, info, handler) +} + +// NetmapService_ServiceDesc is the grpc.ServiceDesc for NetmapService service. +// It's only intended for direct use with grpc.RegisterService, +// and not to be introspected or modified (even as a copy) +var NetmapService_ServiceDesc = grpc.ServiceDesc{ + ServiceName: "neo.fs.v2.netmap.NetmapService", + HandlerType: (*NetmapServiceServer)(nil), + Methods: []grpc.MethodDesc{ + { + MethodName: "LocalNodeInfo", + Handler: _NetmapService_LocalNodeInfo_Handler, + }, + { + MethodName: "NetworkInfo", + Handler: _NetmapService_NetworkInfo_Handler, + }, + { + MethodName: "NetmapSnapshot", + Handler: _NetmapService_NetmapSnapshot_Handler, + }, + }, + Streams: []grpc.StreamDesc{}, + Metadata: "netmap/grpc/service.proto", +} diff --git a/api/netmap/types.pb.go b/api/netmap/types.pb.go new file mode 100644 index 000000000..aea6e6326 --- /dev/null +++ b/api/netmap/types.pb.go @@ -0,0 +1,1367 @@ +// Code generated by protoc-gen-go. DO NOT EDIT. +// versions: +// protoc-gen-go v1.32.0 +// protoc v4.25.1 +// source: netmap/grpc/types.proto + +package netmap + +import ( + reflect "reflect" + sync "sync" + + "github.com/nspcc-dev/neofs-sdk-go/api/refs" + protoreflect "google.golang.org/protobuf/reflect/protoreflect" + protoimpl "google.golang.org/protobuf/runtime/protoimpl" +) + +const ( + // Verify that this generated code is sufficiently up-to-date. + _ = protoimpl.EnforceVersion(20 - protoimpl.MinVersion) + // Verify that runtime/protoimpl is sufficiently up-to-date. + _ = protoimpl.EnforceVersion(protoimpl.MaxVersion - 20) +) + +// Operations on filters +type Operation int32 + +const ( + // No Operation defined + Operation_OPERATION_UNSPECIFIED Operation = 0 + // Equal + Operation_EQ Operation = 1 + // Not Equal + Operation_NE Operation = 2 + // Greater then + Operation_GT Operation = 3 + // Greater or equal + Operation_GE Operation = 4 + // Less then + Operation_LT Operation = 5 + // Less or equal + Operation_LE Operation = 6 + // Logical OR + Operation_OR Operation = 7 + // Logical AND + Operation_AND Operation = 8 +) + +// Enum value maps for Operation. +var ( + Operation_name = map[int32]string{ + 0: "OPERATION_UNSPECIFIED", + 1: "EQ", + 2: "NE", + 3: "GT", + 4: "GE", + 5: "LT", + 6: "LE", + 7: "OR", + 8: "AND", + } + Operation_value = map[string]int32{ + "OPERATION_UNSPECIFIED": 0, + "EQ": 1, + "NE": 2, + "GT": 3, + "GE": 4, + "LT": 5, + "LE": 6, + "OR": 7, + "AND": 8, + } +) + +func (x Operation) Enum() *Operation { + p := new(Operation) + *p = x + return p +} + +func (x Operation) String() string { + return protoimpl.X.EnumStringOf(x.Descriptor(), protoreflect.EnumNumber(x)) +} + +func (Operation) Descriptor() protoreflect.EnumDescriptor { + return file_netmap_grpc_types_proto_enumTypes[0].Descriptor() +} + +func (Operation) Type() protoreflect.EnumType { + return &file_netmap_grpc_types_proto_enumTypes[0] +} + +func (x Operation) Number() protoreflect.EnumNumber { + return protoreflect.EnumNumber(x) +} + +// Deprecated: Use Operation.Descriptor instead. +func (Operation) EnumDescriptor() ([]byte, []int) { + return file_netmap_grpc_types_proto_rawDescGZIP(), []int{0} +} + +// Selector modifier shows how the node set will be formed. By default selector +// just groups nodes into a bucket by attribute, selecting nodes only by their +// hash distance. +type Clause int32 + +const ( + // No modifier defined. Nodes will be selected from the bucket randomly + Clause_CLAUSE_UNSPECIFIED Clause = 0 + // SAME will select only nodes having the same value of bucket attribute + Clause_SAME Clause = 1 + // DISTINCT will select nodes having different values of bucket attribute + Clause_DISTINCT Clause = 2 +) + +// Enum value maps for Clause. +var ( + Clause_name = map[int32]string{ + 0: "CLAUSE_UNSPECIFIED", + 1: "SAME", + 2: "DISTINCT", + } + Clause_value = map[string]int32{ + "CLAUSE_UNSPECIFIED": 0, + "SAME": 1, + "DISTINCT": 2, + } +) + +func (x Clause) Enum() *Clause { + p := new(Clause) + *p = x + return p +} + +func (x Clause) String() string { + return protoimpl.X.EnumStringOf(x.Descriptor(), protoreflect.EnumNumber(x)) +} + +func (Clause) Descriptor() protoreflect.EnumDescriptor { + return file_netmap_grpc_types_proto_enumTypes[1].Descriptor() +} + +func (Clause) Type() protoreflect.EnumType { + return &file_netmap_grpc_types_proto_enumTypes[1] +} + +func (x Clause) Number() protoreflect.EnumNumber { + return protoreflect.EnumNumber(x) +} + +// Deprecated: Use Clause.Descriptor instead. +func (Clause) EnumDescriptor() ([]byte, []int) { + return file_netmap_grpc_types_proto_rawDescGZIP(), []int{1} +} + +// Represents the enumeration of various states of the NeoFS node. +type NodeInfo_State int32 + +const ( + // Unknown state + NodeInfo_UNSPECIFIED NodeInfo_State = 0 + // Active state in the network + NodeInfo_ONLINE NodeInfo_State = 1 + // Network unavailable state + NodeInfo_OFFLINE NodeInfo_State = 2 + // Maintenance state + NodeInfo_MAINTENANCE NodeInfo_State = 3 +) + +// Enum value maps for NodeInfo_State. +var ( + NodeInfo_State_name = map[int32]string{ + 0: "UNSPECIFIED", + 1: "ONLINE", + 2: "OFFLINE", + 3: "MAINTENANCE", + } + NodeInfo_State_value = map[string]int32{ + "UNSPECIFIED": 0, + "ONLINE": 1, + "OFFLINE": 2, + "MAINTENANCE": 3, + } +) + +func (x NodeInfo_State) Enum() *NodeInfo_State { + p := new(NodeInfo_State) + *p = x + return p +} + +func (x NodeInfo_State) String() string { + return protoimpl.X.EnumStringOf(x.Descriptor(), protoreflect.EnumNumber(x)) +} + +func (NodeInfo_State) Descriptor() protoreflect.EnumDescriptor { + return file_netmap_grpc_types_proto_enumTypes[2].Descriptor() +} + +func (NodeInfo_State) Type() protoreflect.EnumType { + return &file_netmap_grpc_types_proto_enumTypes[2] +} + +func (x NodeInfo_State) Number() protoreflect.EnumNumber { + return protoreflect.EnumNumber(x) +} + +// Deprecated: Use NodeInfo_State.Descriptor instead. +func (NodeInfo_State) EnumDescriptor() ([]byte, []int) { + return file_netmap_grpc_types_proto_rawDescGZIP(), []int{4, 0} +} + +// This filter will return the subset of nodes from `NetworkMap` or another filter's +// results that will satisfy filter's conditions. +type Filter struct { + state protoimpl.MessageState + sizeCache protoimpl.SizeCache + unknownFields protoimpl.UnknownFields + + // Name of the filter or a reference to a named filter. '*' means + // application to the whole unfiltered NetworkMap. At top level it's used as a + // filter name. At lower levels it's considered to be a reference to another + // named filter + Name string `protobuf:"bytes,1,opt,name=name,proto3" json:"name,omitempty"` + // Key to filter + Key string `protobuf:"bytes,2,opt,name=key,proto3" json:"key,omitempty"` + // Filtering operation + Op Operation `protobuf:"varint,3,opt,name=op,proto3,enum=neo.fs.v2.netmap.Operation" json:"op,omitempty"` + // Value to match + Value string `protobuf:"bytes,4,opt,name=value,proto3" json:"value,omitempty"` + // List of inner filters. Top level operation will be applied to the whole + // list. + Filters []*Filter `protobuf:"bytes,5,rep,name=filters,proto3" json:"filters,omitempty"` +} + +func (x *Filter) Reset() { + *x = Filter{} + if protoimpl.UnsafeEnabled { + mi := &file_netmap_grpc_types_proto_msgTypes[0] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) + } +} + +func (x *Filter) String() string { + return protoimpl.X.MessageStringOf(x) +} + +func (*Filter) ProtoMessage() {} + +func (x *Filter) ProtoReflect() protoreflect.Message { + mi := &file_netmap_grpc_types_proto_msgTypes[0] + if protoimpl.UnsafeEnabled && x != nil { + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + if ms.LoadMessageInfo() == nil { + ms.StoreMessageInfo(mi) + } + return ms + } + return mi.MessageOf(x) +} + +// Deprecated: Use Filter.ProtoReflect.Descriptor instead. +func (*Filter) Descriptor() ([]byte, []int) { + return file_netmap_grpc_types_proto_rawDescGZIP(), []int{0} +} + +func (x *Filter) GetName() string { + if x != nil { + return x.Name + } + return "" +} + +func (x *Filter) GetKey() string { + if x != nil { + return x.Key + } + return "" +} + +func (x *Filter) GetOp() Operation { + if x != nil { + return x.Op + } + return Operation_OPERATION_UNSPECIFIED +} + +func (x *Filter) GetValue() string { + if x != nil { + return x.Value + } + return "" +} + +func (x *Filter) GetFilters() []*Filter { + if x != nil { + return x.Filters + } + return nil +} + +// Selector chooses a number of nodes from the bucket taking the nearest nodes +// to the provided `ContainerID` by hash distance. +type Selector struct { + state protoimpl.MessageState + sizeCache protoimpl.SizeCache + unknownFields protoimpl.UnknownFields + + // Selector name to reference in object placement section + Name string `protobuf:"bytes,1,opt,name=name,proto3" json:"name,omitempty"` + // How many nodes to select from the bucket + Count uint32 `protobuf:"varint,2,opt,name=count,proto3" json:"count,omitempty"` + // Selector modifier showing how to form a bucket + Clause Clause `protobuf:"varint,3,opt,name=clause,proto3,enum=neo.fs.v2.netmap.Clause" json:"clause,omitempty"` + // Bucket attribute to select from + Attribute string `protobuf:"bytes,4,opt,name=attribute,proto3" json:"attribute,omitempty"` + // Filter reference to select from + Filter string `protobuf:"bytes,5,opt,name=filter,proto3" json:"filter,omitempty"` +} + +func (x *Selector) Reset() { + *x = Selector{} + if protoimpl.UnsafeEnabled { + mi := &file_netmap_grpc_types_proto_msgTypes[1] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) + } +} + +func (x *Selector) String() string { + return protoimpl.X.MessageStringOf(x) +} + +func (*Selector) ProtoMessage() {} + +func (x *Selector) ProtoReflect() protoreflect.Message { + mi := &file_netmap_grpc_types_proto_msgTypes[1] + if protoimpl.UnsafeEnabled && x != nil { + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + if ms.LoadMessageInfo() == nil { + ms.StoreMessageInfo(mi) + } + return ms + } + return mi.MessageOf(x) +} + +// Deprecated: Use Selector.ProtoReflect.Descriptor instead. +func (*Selector) Descriptor() ([]byte, []int) { + return file_netmap_grpc_types_proto_rawDescGZIP(), []int{1} +} + +func (x *Selector) GetName() string { + if x != nil { + return x.Name + } + return "" +} + +func (x *Selector) GetCount() uint32 { + if x != nil { + return x.Count + } + return 0 +} + +func (x *Selector) GetClause() Clause { + if x != nil { + return x.Clause + } + return Clause_CLAUSE_UNSPECIFIED +} + +func (x *Selector) GetAttribute() string { + if x != nil { + return x.Attribute + } + return "" +} + +func (x *Selector) GetFilter() string { + if x != nil { + return x.Filter + } + return "" +} + +// Number of object replicas in a set of nodes from the defined selector. If no +// selector set, the root bucket containing all possible nodes will be used by +// default. +type Replica struct { + state protoimpl.MessageState + sizeCache protoimpl.SizeCache + unknownFields protoimpl.UnknownFields + + // How many object replicas to put + Count uint32 `protobuf:"varint,1,opt,name=count,proto3" json:"count,omitempty"` + // Named selector bucket to put replicas + Selector string `protobuf:"bytes,2,opt,name=selector,proto3" json:"selector,omitempty"` +} + +func (x *Replica) Reset() { + *x = Replica{} + if protoimpl.UnsafeEnabled { + mi := &file_netmap_grpc_types_proto_msgTypes[2] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) + } +} + +func (x *Replica) String() string { + return protoimpl.X.MessageStringOf(x) +} + +func (*Replica) ProtoMessage() {} + +func (x *Replica) ProtoReflect() protoreflect.Message { + mi := &file_netmap_grpc_types_proto_msgTypes[2] + if protoimpl.UnsafeEnabled && x != nil { + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + if ms.LoadMessageInfo() == nil { + ms.StoreMessageInfo(mi) + } + return ms + } + return mi.MessageOf(x) +} + +// Deprecated: Use Replica.ProtoReflect.Descriptor instead. +func (*Replica) Descriptor() ([]byte, []int) { + return file_netmap_grpc_types_proto_rawDescGZIP(), []int{2} +} + +func (x *Replica) GetCount() uint32 { + if x != nil { + return x.Count + } + return 0 +} + +func (x *Replica) GetSelector() string { + if x != nil { + return x.Selector + } + return "" +} + +// Set of rules to select a subset of nodes from `NetworkMap` able to store +// container's objects. The format is simple enough to transpile from different +// storage policy definition languages. +type PlacementPolicy struct { + state protoimpl.MessageState + sizeCache protoimpl.SizeCache + unknownFields protoimpl.UnknownFields + + // Rules to set number of object replicas and place each one into a named + // bucket + Replicas []*Replica `protobuf:"bytes,1,rep,name=replicas,proto3" json:"replicas,omitempty"` + // Container backup factor controls how deep NeoFS will search for nodes + // alternatives to include into container's nodes subset + ContainerBackupFactor uint32 `protobuf:"varint,2,opt,name=container_backup_factor,json=containerBackupFactor,proto3" json:"container_backup_factor,omitempty"` + // Set of Selectors to form the container's nodes subset + Selectors []*Selector `protobuf:"bytes,3,rep,name=selectors,proto3" json:"selectors,omitempty"` + // List of named filters to reference in selectors + Filters []*Filter `protobuf:"bytes,4,rep,name=filters,proto3" json:"filters,omitempty"` + // DEPRECATED. Was used for subnetwork ID to select nodes from, currently + // ignored. + // + // Deprecated: Marked as deprecated in netmap/grpc/types.proto. + SubnetId *refs.SubnetID `protobuf:"bytes,5,opt,name=subnet_id,json=subnetId,proto3" json:"subnet_id,omitempty"` +} + +func (x *PlacementPolicy) Reset() { + *x = PlacementPolicy{} + if protoimpl.UnsafeEnabled { + mi := &file_netmap_grpc_types_proto_msgTypes[3] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) + } +} + +func (x *PlacementPolicy) String() string { + return protoimpl.X.MessageStringOf(x) +} + +func (*PlacementPolicy) ProtoMessage() {} + +func (x *PlacementPolicy) ProtoReflect() protoreflect.Message { + mi := &file_netmap_grpc_types_proto_msgTypes[3] + if protoimpl.UnsafeEnabled && x != nil { + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + if ms.LoadMessageInfo() == nil { + ms.StoreMessageInfo(mi) + } + return ms + } + return mi.MessageOf(x) +} + +// Deprecated: Use PlacementPolicy.ProtoReflect.Descriptor instead. +func (*PlacementPolicy) Descriptor() ([]byte, []int) { + return file_netmap_grpc_types_proto_rawDescGZIP(), []int{3} +} + +func (x *PlacementPolicy) GetReplicas() []*Replica { + if x != nil { + return x.Replicas + } + return nil +} + +func (x *PlacementPolicy) GetContainerBackupFactor() uint32 { + if x != nil { + return x.ContainerBackupFactor + } + return 0 +} + +func (x *PlacementPolicy) GetSelectors() []*Selector { + if x != nil { + return x.Selectors + } + return nil +} + +func (x *PlacementPolicy) GetFilters() []*Filter { + if x != nil { + return x.Filters + } + return nil +} + +// Deprecated: Marked as deprecated in netmap/grpc/types.proto. +func (x *PlacementPolicy) GetSubnetId() *refs.SubnetID { + if x != nil { + return x.SubnetId + } + return nil +} + +// NeoFS node description +type NodeInfo struct { + state protoimpl.MessageState + sizeCache protoimpl.SizeCache + unknownFields protoimpl.UnknownFields + + // Public key of the NeoFS node in a binary format + PublicKey []byte `protobuf:"bytes,1,opt,name=public_key,json=publicKey,proto3" json:"public_key,omitempty"` + // Ways to connect to a node + Addresses []string `protobuf:"bytes,2,rep,name=addresses,proto3" json:"addresses,omitempty"` + // Carries list of the NeoFS node attributes in a key-value form. Key name + // must be a node-unique valid UTF-8 string. Value can't be empty. NodeInfo + // structures with duplicated attribute names or attributes with empty values + // will be considered invalid. + Attributes []*NodeInfo_Attribute `protobuf:"bytes,3,rep,name=attributes,proto3" json:"attributes,omitempty"` + // Carries state of the NeoFS node + State NodeInfo_State `protobuf:"varint,4,opt,name=state,proto3,enum=neo.fs.v2.netmap.NodeInfo_State" json:"state,omitempty"` +} + +func (x *NodeInfo) Reset() { + *x = NodeInfo{} + if protoimpl.UnsafeEnabled { + mi := &file_netmap_grpc_types_proto_msgTypes[4] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) + } +} + +func (x *NodeInfo) String() string { + return protoimpl.X.MessageStringOf(x) +} + +func (*NodeInfo) ProtoMessage() {} + +func (x *NodeInfo) ProtoReflect() protoreflect.Message { + mi := &file_netmap_grpc_types_proto_msgTypes[4] + if protoimpl.UnsafeEnabled && x != nil { + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + if ms.LoadMessageInfo() == nil { + ms.StoreMessageInfo(mi) + } + return ms + } + return mi.MessageOf(x) +} + +// Deprecated: Use NodeInfo.ProtoReflect.Descriptor instead. +func (*NodeInfo) Descriptor() ([]byte, []int) { + return file_netmap_grpc_types_proto_rawDescGZIP(), []int{4} +} + +func (x *NodeInfo) GetPublicKey() []byte { + if x != nil { + return x.PublicKey + } + return nil +} + +func (x *NodeInfo) GetAddresses() []string { + if x != nil { + return x.Addresses + } + return nil +} + +func (x *NodeInfo) GetAttributes() []*NodeInfo_Attribute { + if x != nil { + return x.Attributes + } + return nil +} + +func (x *NodeInfo) GetState() NodeInfo_State { + if x != nil { + return x.State + } + return NodeInfo_UNSPECIFIED +} + +// Network map structure +type Netmap struct { + state protoimpl.MessageState + sizeCache protoimpl.SizeCache + unknownFields protoimpl.UnknownFields + + // Network map revision number. + Epoch uint64 `protobuf:"varint,1,opt,name=epoch,proto3" json:"epoch,omitempty"` + // Nodes presented in network. + Nodes []*NodeInfo `protobuf:"bytes,2,rep,name=nodes,proto3" json:"nodes,omitempty"` +} + +func (x *Netmap) Reset() { + *x = Netmap{} + if protoimpl.UnsafeEnabled { + mi := &file_netmap_grpc_types_proto_msgTypes[5] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) + } +} + +func (x *Netmap) String() string { + return protoimpl.X.MessageStringOf(x) +} + +func (*Netmap) ProtoMessage() {} + +func (x *Netmap) ProtoReflect() protoreflect.Message { + mi := &file_netmap_grpc_types_proto_msgTypes[5] + if protoimpl.UnsafeEnabled && x != nil { + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + if ms.LoadMessageInfo() == nil { + ms.StoreMessageInfo(mi) + } + return ms + } + return mi.MessageOf(x) +} + +// Deprecated: Use Netmap.ProtoReflect.Descriptor instead. +func (*Netmap) Descriptor() ([]byte, []int) { + return file_netmap_grpc_types_proto_rawDescGZIP(), []int{5} +} + +func (x *Netmap) GetEpoch() uint64 { + if x != nil { + return x.Epoch + } + return 0 +} + +func (x *Netmap) GetNodes() []*NodeInfo { + if x != nil { + return x.Nodes + } + return nil +} + +// NeoFS network configuration +type NetworkConfig struct { + state protoimpl.MessageState + sizeCache protoimpl.SizeCache + unknownFields protoimpl.UnknownFields + + // List of parameter values + Parameters []*NetworkConfig_Parameter `protobuf:"bytes,1,rep,name=parameters,proto3" json:"parameters,omitempty"` +} + +func (x *NetworkConfig) Reset() { + *x = NetworkConfig{} + if protoimpl.UnsafeEnabled { + mi := &file_netmap_grpc_types_proto_msgTypes[6] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) + } +} + +func (x *NetworkConfig) String() string { + return protoimpl.X.MessageStringOf(x) +} + +func (*NetworkConfig) ProtoMessage() {} + +func (x *NetworkConfig) ProtoReflect() protoreflect.Message { + mi := &file_netmap_grpc_types_proto_msgTypes[6] + if protoimpl.UnsafeEnabled && x != nil { + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + if ms.LoadMessageInfo() == nil { + ms.StoreMessageInfo(mi) + } + return ms + } + return mi.MessageOf(x) +} + +// Deprecated: Use NetworkConfig.ProtoReflect.Descriptor instead. +func (*NetworkConfig) Descriptor() ([]byte, []int) { + return file_netmap_grpc_types_proto_rawDescGZIP(), []int{6} +} + +func (x *NetworkConfig) GetParameters() []*NetworkConfig_Parameter { + if x != nil { + return x.Parameters + } + return nil +} + +// Information about NeoFS network +type NetworkInfo struct { + state protoimpl.MessageState + sizeCache protoimpl.SizeCache + unknownFields protoimpl.UnknownFields + + // Number of the current epoch in the NeoFS network + CurrentEpoch uint64 `protobuf:"varint,1,opt,name=current_epoch,json=currentEpoch,proto3" json:"current_epoch,omitempty"` + // Magic number of the sidechain of the NeoFS network + MagicNumber uint64 `protobuf:"varint,2,opt,name=magic_number,json=magicNumber,proto3" json:"magic_number,omitempty"` + // MillisecondsPerBlock network parameter of the sidechain of the NeoFS network + MsPerBlock int64 `protobuf:"varint,3,opt,name=ms_per_block,json=msPerBlock,proto3" json:"ms_per_block,omitempty"` + // NeoFS network configuration + NetworkConfig *NetworkConfig `protobuf:"bytes,4,opt,name=network_config,json=networkConfig,proto3" json:"network_config,omitempty"` +} + +func (x *NetworkInfo) Reset() { + *x = NetworkInfo{} + if protoimpl.UnsafeEnabled { + mi := &file_netmap_grpc_types_proto_msgTypes[7] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) + } +} + +func (x *NetworkInfo) String() string { + return protoimpl.X.MessageStringOf(x) +} + +func (*NetworkInfo) ProtoMessage() {} + +func (x *NetworkInfo) ProtoReflect() protoreflect.Message { + mi := &file_netmap_grpc_types_proto_msgTypes[7] + if protoimpl.UnsafeEnabled && x != nil { + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + if ms.LoadMessageInfo() == nil { + ms.StoreMessageInfo(mi) + } + return ms + } + return mi.MessageOf(x) +} + +// Deprecated: Use NetworkInfo.ProtoReflect.Descriptor instead. +func (*NetworkInfo) Descriptor() ([]byte, []int) { + return file_netmap_grpc_types_proto_rawDescGZIP(), []int{7} +} + +func (x *NetworkInfo) GetCurrentEpoch() uint64 { + if x != nil { + return x.CurrentEpoch + } + return 0 +} + +func (x *NetworkInfo) GetMagicNumber() uint64 { + if x != nil { + return x.MagicNumber + } + return 0 +} + +func (x *NetworkInfo) GetMsPerBlock() int64 { + if x != nil { + return x.MsPerBlock + } + return 0 +} + +func (x *NetworkInfo) GetNetworkConfig() *NetworkConfig { + if x != nil { + return x.NetworkConfig + } + return nil +} + +// Administrator-defined Attributes of the NeoFS Storage Node. +// +// `Attribute` is a Key-Value metadata pair. Key name must be a valid UTF-8 +// string. Value can't be empty. +// +// Attributes can be constructed into a chain of attributes: any attribute can +// have a parent attribute and a child attribute (except the first and the last +// one). A string representation of the chain of attributes in NeoFS Storage +// Node configuration uses ":" and "/" symbols, e.g.: +// +// `NEOFS_NODE_ATTRIBUTE_1=key1:val1/key2:val2` +// +// Therefore the string attribute representation in the Node configuration must +// use "\:", "\/" and "\\" escaped symbols if any of them appears in an attribute's +// key or value. +// +// Node's attributes are mostly used during Storage Policy evaluation to +// calculate object's placement and find a set of nodes satisfying policy +// requirements. There are some "well-known" node attributes common to all the +// Storage Nodes in the network and used implicitly with default values if not +// explicitly set: +// +// - Capacity \ +// Total available disk space in Gigabytes. +// - Price \ +// Price in GAS tokens for storing one GB of data during one Epoch. In node +// attributes it's a string presenting floating point number with comma or +// point delimiter for decimal part. In the Network Map it will be saved as +// 64-bit unsigned integer representing number of minimal token fractions. +// - __NEOFS__SUBNET_%s \ +// DEPRECATED. Defined if the node is included in the `%s` subnetwork +// or not. Currently ignored. +// - UN-LOCODE \ +// Node's geographic location in +// [UN/LOCODE](https://www.unece.org/cefact/codesfortrade/codes_index.html) +// format approximated to the nearest point defined in the standard. +// - CountryCode \ +// Country code in +// [ISO 3166-1_alpha-2](https://en.wikipedia.org/wiki/ISO_3166-1_alpha-2) +// format. Calculated automatically from `UN-LOCODE` attribute. +// - Country \ +// Country short name in English, as defined in +// [ISO-3166](https://www.iso.org/obp/ui/#search). Calculated automatically +// from `UN-LOCODE` attribute. +// - Location \ +// Place names are given, whenever possible, in their national language +// versions as expressed in the Roman alphabet using the 26 characters of +// the character set adopted for international trade data interchange, +// written without diacritics . Calculated automatically from `UN-LOCODE` +// attribute. +// - SubDivCode \ +// Country's administrative subdivision where node is located. Calculated +// automatically from `UN-LOCODE` attribute based on `SubDiv` field. +// Presented in [ISO 3166-2](https://en.wikipedia.org/wiki/ISO_3166-2) +// format. +// - SubDiv \ +// Country's administrative subdivision name, as defined in +// [ISO 3166-2](https://en.wikipedia.org/wiki/ISO_3166-2). Calculated +// automatically from `UN-LOCODE` attribute. +// - Continent \ +// Node's continent name according to the [Seven-Continent model] +// (https://en.wikipedia.org/wiki/Continent#Number). Calculated +// automatically from `UN-LOCODE` attribute. +// - ExternalAddr +// Node's preferred way for communications with external clients. +// Clients SHOULD use these addresses if possible. +// Must contain a comma-separated list of multi-addresses. +// - Version +// Node implementation's version in a free string form. +// - VerifiedNodesDomain +// Confirmation of admission to a group of storage nodes. +// The value is the domain name registered in the NeoFS NNS. If attribute +// is specified, the storage node requesting entry into the NeoFS network +// map with this attribute must be included in the access list located on +// the specified domain. The access list is represented by a set of TXT +// records: Neo addresses resolved from public keys. To be admitted to the +// network, Neo address of the node's public key declared in 'public_key' +// field must be present in domain records. Otherwise, registration will be +// denied. +// Value must be a valid NeoFS NNS domain name. Note that if this attribute +// is absent, this check is not carried out. +// +// For detailed description of each well-known attribute please see the +// corresponding section in NeoFS Technical Specification. +type NodeInfo_Attribute struct { + state protoimpl.MessageState + sizeCache protoimpl.SizeCache + unknownFields protoimpl.UnknownFields + + // Key of the node attribute + Key string `protobuf:"bytes,1,opt,name=key,proto3" json:"key,omitempty"` + // Value of the node attribute + Value string `protobuf:"bytes,2,opt,name=value,proto3" json:"value,omitempty"` + // Parent keys, if any. For example for `City` it could be `Region` and + // `Country`. + Parents []string `protobuf:"bytes,3,rep,name=parents,proto3" json:"parents,omitempty"` +} + +func (x *NodeInfo_Attribute) Reset() { + *x = NodeInfo_Attribute{} + if protoimpl.UnsafeEnabled { + mi := &file_netmap_grpc_types_proto_msgTypes[8] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) + } +} + +func (x *NodeInfo_Attribute) String() string { + return protoimpl.X.MessageStringOf(x) +} + +func (*NodeInfo_Attribute) ProtoMessage() {} + +func (x *NodeInfo_Attribute) ProtoReflect() protoreflect.Message { + mi := &file_netmap_grpc_types_proto_msgTypes[8] + if protoimpl.UnsafeEnabled && x != nil { + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + if ms.LoadMessageInfo() == nil { + ms.StoreMessageInfo(mi) + } + return ms + } + return mi.MessageOf(x) +} + +// Deprecated: Use NodeInfo_Attribute.ProtoReflect.Descriptor instead. +func (*NodeInfo_Attribute) Descriptor() ([]byte, []int) { + return file_netmap_grpc_types_proto_rawDescGZIP(), []int{4, 0} +} + +func (x *NodeInfo_Attribute) GetKey() string { + if x != nil { + return x.Key + } + return "" +} + +func (x *NodeInfo_Attribute) GetValue() string { + if x != nil { + return x.Value + } + return "" +} + +func (x *NodeInfo_Attribute) GetParents() []string { + if x != nil { + return x.Parents + } + return nil +} + +// Single configuration parameter. Key MUST be network-unique. +// +// System parameters: +// - **AuditFee** \ +// Fee paid by the storage group owner to the Inner Ring member. +// Value: little-endian integer. Default: 0. +// - **BasicIncomeRate** \ +// Cost of storing one gigabyte of data for a period of one epoch. Paid by +// container owner to container nodes. +// Value: little-endian integer. Default: 0. +// - **ContainerAliasFee** \ +// Fee paid for named container's creation by the container owner. +// Value: little-endian integer. Default: 0. +// - **ContainerFee** \ +// Fee paid for container creation by the container owner. +// Value: little-endian integer. Default: 0. +// - **EigenTrustAlpha** \ +// Alpha parameter of EigenTrust algorithm used in the Reputation system. +// Value: decimal floating-point number in UTF-8 string representation. +// Default: 0. +// - **EigenTrustIterations** \ +// Number of EigenTrust algorithm iterations to pass in the Reputation system. +// Value: little-endian integer. Default: 0. +// - **EpochDuration** \ +// NeoFS epoch duration measured in Sidechain blocks. +// Value: little-endian integer. Default: 0. +// - **HomomorphicHashingDisabled** \ +// Flag of disabling the homomorphic hashing of objects' payload. +// Value: true if any byte != 0. Default: false. +// - **InnerRingCandidateFee** \ +// Fee for entrance to the Inner Ring paid by the candidate. +// Value: little-endian integer. Default: 0. +// - **MaintenanceModeAllowed** \ +// Flag allowing setting the MAINTENANCE state to storage nodes. +// Value: true if any byte != 0. Default: false. +// - **MaxObjectSize** \ +// Maximum size of physically stored NeoFS object measured in bytes. +// Value: little-endian integer. Default: 0. +// - **WithdrawFee** \ +// Fee paid for withdrawal of funds paid by the account owner. +// Value: little-endian integer. Default: 0. +type NetworkConfig_Parameter struct { + state protoimpl.MessageState + sizeCache protoimpl.SizeCache + unknownFields protoimpl.UnknownFields + + // Parameter key. UTF-8 encoded string + Key []byte `protobuf:"bytes,1,opt,name=key,proto3" json:"key,omitempty"` + // Parameter value + Value []byte `protobuf:"bytes,2,opt,name=value,proto3" json:"value,omitempty"` +} + +func (x *NetworkConfig_Parameter) Reset() { + *x = NetworkConfig_Parameter{} + if protoimpl.UnsafeEnabled { + mi := &file_netmap_grpc_types_proto_msgTypes[9] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) + } +} + +func (x *NetworkConfig_Parameter) String() string { + return protoimpl.X.MessageStringOf(x) +} + +func (*NetworkConfig_Parameter) ProtoMessage() {} + +func (x *NetworkConfig_Parameter) ProtoReflect() protoreflect.Message { + mi := &file_netmap_grpc_types_proto_msgTypes[9] + if protoimpl.UnsafeEnabled && x != nil { + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + if ms.LoadMessageInfo() == nil { + ms.StoreMessageInfo(mi) + } + return ms + } + return mi.MessageOf(x) +} + +// Deprecated: Use NetworkConfig_Parameter.ProtoReflect.Descriptor instead. +func (*NetworkConfig_Parameter) Descriptor() ([]byte, []int) { + return file_netmap_grpc_types_proto_rawDescGZIP(), []int{6, 0} +} + +func (x *NetworkConfig_Parameter) GetKey() []byte { + if x != nil { + return x.Key + } + return nil +} + +func (x *NetworkConfig_Parameter) GetValue() []byte { + if x != nil { + return x.Value + } + return nil +} + +var File_netmap_grpc_types_proto protoreflect.FileDescriptor + +var file_netmap_grpc_types_proto_rawDesc = []byte{ + 0x0a, 0x17, 0x6e, 0x65, 0x74, 0x6d, 0x61, 0x70, 0x2f, 0x67, 0x72, 0x70, 0x63, 0x2f, 0x74, 0x79, + 0x70, 0x65, 0x73, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x12, 0x10, 0x6e, 0x65, 0x6f, 0x2e, 0x66, + 0x73, 0x2e, 0x76, 0x32, 0x2e, 0x6e, 0x65, 0x74, 0x6d, 0x61, 0x70, 0x1a, 0x15, 0x72, 0x65, 0x66, + 0x73, 0x2f, 0x67, 0x72, 0x70, 0x63, 0x2f, 0x74, 0x79, 0x70, 0x65, 0x73, 0x2e, 0x70, 0x72, 0x6f, + 0x74, 0x6f, 0x22, 0xa5, 0x01, 0x0a, 0x06, 0x46, 0x69, 0x6c, 0x74, 0x65, 0x72, 0x12, 0x12, 0x0a, + 0x04, 0x6e, 0x61, 0x6d, 0x65, 0x18, 0x01, 0x20, 0x01, 0x28, 0x09, 0x52, 0x04, 0x6e, 0x61, 0x6d, + 0x65, 0x12, 0x10, 0x0a, 0x03, 0x6b, 0x65, 0x79, 0x18, 0x02, 0x20, 0x01, 0x28, 0x09, 0x52, 0x03, + 0x6b, 0x65, 0x79, 0x12, 0x2b, 0x0a, 0x02, 0x6f, 0x70, 0x18, 0x03, 0x20, 0x01, 0x28, 0x0e, 0x32, + 0x1b, 0x2e, 0x6e, 0x65, 0x6f, 0x2e, 0x66, 0x73, 0x2e, 0x76, 0x32, 0x2e, 0x6e, 0x65, 0x74, 0x6d, + 0x61, 0x70, 0x2e, 0x4f, 0x70, 0x65, 0x72, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x52, 0x02, 0x6f, 0x70, + 0x12, 0x14, 0x0a, 0x05, 0x76, 0x61, 0x6c, 0x75, 0x65, 0x18, 0x04, 0x20, 0x01, 0x28, 0x09, 0x52, + 0x05, 0x76, 0x61, 0x6c, 0x75, 0x65, 0x12, 0x32, 0x0a, 0x07, 0x66, 0x69, 0x6c, 0x74, 0x65, 0x72, + 0x73, 0x18, 0x05, 0x20, 0x03, 0x28, 0x0b, 0x32, 0x18, 0x2e, 0x6e, 0x65, 0x6f, 0x2e, 0x66, 0x73, + 0x2e, 0x76, 0x32, 0x2e, 0x6e, 0x65, 0x74, 0x6d, 0x61, 0x70, 0x2e, 0x46, 0x69, 0x6c, 0x74, 0x65, + 0x72, 0x52, 0x07, 0x66, 0x69, 0x6c, 0x74, 0x65, 0x72, 0x73, 0x22, 0x9c, 0x01, 0x0a, 0x08, 0x53, + 0x65, 0x6c, 0x65, 0x63, 0x74, 0x6f, 0x72, 0x12, 0x12, 0x0a, 0x04, 0x6e, 0x61, 0x6d, 0x65, 0x18, + 0x01, 0x20, 0x01, 0x28, 0x09, 0x52, 0x04, 0x6e, 0x61, 0x6d, 0x65, 0x12, 0x14, 0x0a, 0x05, 0x63, + 0x6f, 0x75, 0x6e, 0x74, 0x18, 0x02, 0x20, 0x01, 0x28, 0x0d, 0x52, 0x05, 0x63, 0x6f, 0x75, 0x6e, + 0x74, 0x12, 0x30, 0x0a, 0x06, 0x63, 0x6c, 0x61, 0x75, 0x73, 0x65, 0x18, 0x03, 0x20, 0x01, 0x28, + 0x0e, 0x32, 0x18, 0x2e, 0x6e, 0x65, 0x6f, 0x2e, 0x66, 0x73, 0x2e, 0x76, 0x32, 0x2e, 0x6e, 0x65, + 0x74, 0x6d, 0x61, 0x70, 0x2e, 0x43, 0x6c, 0x61, 0x75, 0x73, 0x65, 0x52, 0x06, 0x63, 0x6c, 0x61, + 0x75, 0x73, 0x65, 0x12, 0x1c, 0x0a, 0x09, 0x61, 0x74, 0x74, 0x72, 0x69, 0x62, 0x75, 0x74, 0x65, + 0x18, 0x04, 0x20, 0x01, 0x28, 0x09, 0x52, 0x09, 0x61, 0x74, 0x74, 0x72, 0x69, 0x62, 0x75, 0x74, + 0x65, 0x12, 0x16, 0x0a, 0x06, 0x66, 0x69, 0x6c, 0x74, 0x65, 0x72, 0x18, 0x05, 0x20, 0x01, 0x28, + 0x09, 0x52, 0x06, 0x66, 0x69, 0x6c, 0x74, 0x65, 0x72, 0x22, 0x3b, 0x0a, 0x07, 0x52, 0x65, 0x70, + 0x6c, 0x69, 0x63, 0x61, 0x12, 0x14, 0x0a, 0x05, 0x63, 0x6f, 0x75, 0x6e, 0x74, 0x18, 0x01, 0x20, + 0x01, 0x28, 0x0d, 0x52, 0x05, 0x63, 0x6f, 0x75, 0x6e, 0x74, 0x12, 0x1a, 0x0a, 0x08, 0x73, 0x65, + 0x6c, 0x65, 0x63, 0x74, 0x6f, 0x72, 0x18, 0x02, 0x20, 0x01, 0x28, 0x09, 0x52, 0x08, 0x73, 0x65, + 0x6c, 0x65, 0x63, 0x74, 0x6f, 0x72, 0x22, 0xa9, 0x02, 0x0a, 0x0f, 0x50, 0x6c, 0x61, 0x63, 0x65, + 0x6d, 0x65, 0x6e, 0x74, 0x50, 0x6f, 0x6c, 0x69, 0x63, 0x79, 0x12, 0x35, 0x0a, 0x08, 0x72, 0x65, + 0x70, 0x6c, 0x69, 0x63, 0x61, 0x73, 0x18, 0x01, 0x20, 0x03, 0x28, 0x0b, 0x32, 0x19, 0x2e, 0x6e, + 0x65, 0x6f, 0x2e, 0x66, 0x73, 0x2e, 0x76, 0x32, 0x2e, 0x6e, 0x65, 0x74, 0x6d, 0x61, 0x70, 0x2e, + 0x52, 0x65, 0x70, 0x6c, 0x69, 0x63, 0x61, 0x52, 0x08, 0x72, 0x65, 0x70, 0x6c, 0x69, 0x63, 0x61, + 0x73, 0x12, 0x36, 0x0a, 0x17, 0x63, 0x6f, 0x6e, 0x74, 0x61, 0x69, 0x6e, 0x65, 0x72, 0x5f, 0x62, + 0x61, 0x63, 0x6b, 0x75, 0x70, 0x5f, 0x66, 0x61, 0x63, 0x74, 0x6f, 0x72, 0x18, 0x02, 0x20, 0x01, + 0x28, 0x0d, 0x52, 0x15, 0x63, 0x6f, 0x6e, 0x74, 0x61, 0x69, 0x6e, 0x65, 0x72, 0x42, 0x61, 0x63, + 0x6b, 0x75, 0x70, 0x46, 0x61, 0x63, 0x74, 0x6f, 0x72, 0x12, 0x38, 0x0a, 0x09, 0x73, 0x65, 0x6c, + 0x65, 0x63, 0x74, 0x6f, 0x72, 0x73, 0x18, 0x03, 0x20, 0x03, 0x28, 0x0b, 0x32, 0x1a, 0x2e, 0x6e, + 0x65, 0x6f, 0x2e, 0x66, 0x73, 0x2e, 0x76, 0x32, 0x2e, 0x6e, 0x65, 0x74, 0x6d, 0x61, 0x70, 0x2e, + 0x53, 0x65, 0x6c, 0x65, 0x63, 0x74, 0x6f, 0x72, 0x52, 0x09, 0x73, 0x65, 0x6c, 0x65, 0x63, 0x74, + 0x6f, 0x72, 0x73, 0x12, 0x32, 0x0a, 0x07, 0x66, 0x69, 0x6c, 0x74, 0x65, 0x72, 0x73, 0x18, 0x04, + 0x20, 0x03, 0x28, 0x0b, 0x32, 0x18, 0x2e, 0x6e, 0x65, 0x6f, 0x2e, 0x66, 0x73, 0x2e, 0x76, 0x32, + 0x2e, 0x6e, 0x65, 0x74, 0x6d, 0x61, 0x70, 0x2e, 0x46, 0x69, 0x6c, 0x74, 0x65, 0x72, 0x52, 0x07, + 0x66, 0x69, 0x6c, 0x74, 0x65, 0x72, 0x73, 0x12, 0x39, 0x0a, 0x09, 0x73, 0x75, 0x62, 0x6e, 0x65, + 0x74, 0x5f, 0x69, 0x64, 0x18, 0x05, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x18, 0x2e, 0x6e, 0x65, 0x6f, + 0x2e, 0x66, 0x73, 0x2e, 0x76, 0x32, 0x2e, 0x72, 0x65, 0x66, 0x73, 0x2e, 0x53, 0x75, 0x62, 0x6e, + 0x65, 0x74, 0x49, 0x44, 0x42, 0x02, 0x18, 0x01, 0x52, 0x08, 0x73, 0x75, 0x62, 0x6e, 0x65, 0x74, + 0x49, 0x64, 0x22, 0xd8, 0x02, 0x0a, 0x08, 0x4e, 0x6f, 0x64, 0x65, 0x49, 0x6e, 0x66, 0x6f, 0x12, + 0x1d, 0x0a, 0x0a, 0x70, 0x75, 0x62, 0x6c, 0x69, 0x63, 0x5f, 0x6b, 0x65, 0x79, 0x18, 0x01, 0x20, + 0x01, 0x28, 0x0c, 0x52, 0x09, 0x70, 0x75, 0x62, 0x6c, 0x69, 0x63, 0x4b, 0x65, 0x79, 0x12, 0x1c, + 0x0a, 0x09, 0x61, 0x64, 0x64, 0x72, 0x65, 0x73, 0x73, 0x65, 0x73, 0x18, 0x02, 0x20, 0x03, 0x28, + 0x09, 0x52, 0x09, 0x61, 0x64, 0x64, 0x72, 0x65, 0x73, 0x73, 0x65, 0x73, 0x12, 0x44, 0x0a, 0x0a, + 0x61, 0x74, 0x74, 0x72, 0x69, 0x62, 0x75, 0x74, 0x65, 0x73, 0x18, 0x03, 0x20, 0x03, 0x28, 0x0b, + 0x32, 0x24, 0x2e, 0x6e, 0x65, 0x6f, 0x2e, 0x66, 0x73, 0x2e, 0x76, 0x32, 0x2e, 0x6e, 0x65, 0x74, + 0x6d, 0x61, 0x70, 0x2e, 0x4e, 0x6f, 0x64, 0x65, 0x49, 0x6e, 0x66, 0x6f, 0x2e, 0x41, 0x74, 0x74, + 0x72, 0x69, 0x62, 0x75, 0x74, 0x65, 0x52, 0x0a, 0x61, 0x74, 0x74, 0x72, 0x69, 0x62, 0x75, 0x74, + 0x65, 0x73, 0x12, 0x36, 0x0a, 0x05, 0x73, 0x74, 0x61, 0x74, 0x65, 0x18, 0x04, 0x20, 0x01, 0x28, + 0x0e, 0x32, 0x20, 0x2e, 0x6e, 0x65, 0x6f, 0x2e, 0x66, 0x73, 0x2e, 0x76, 0x32, 0x2e, 0x6e, 0x65, + 0x74, 0x6d, 0x61, 0x70, 0x2e, 0x4e, 0x6f, 0x64, 0x65, 0x49, 0x6e, 0x66, 0x6f, 0x2e, 0x53, 0x74, + 0x61, 0x74, 0x65, 0x52, 0x05, 0x73, 0x74, 0x61, 0x74, 0x65, 0x1a, 0x4d, 0x0a, 0x09, 0x41, 0x74, + 0x74, 0x72, 0x69, 0x62, 0x75, 0x74, 0x65, 0x12, 0x10, 0x0a, 0x03, 0x6b, 0x65, 0x79, 0x18, 0x01, + 0x20, 0x01, 0x28, 0x09, 0x52, 0x03, 0x6b, 0x65, 0x79, 0x12, 0x14, 0x0a, 0x05, 0x76, 0x61, 0x6c, + 0x75, 0x65, 0x18, 0x02, 0x20, 0x01, 0x28, 0x09, 0x52, 0x05, 0x76, 0x61, 0x6c, 0x75, 0x65, 0x12, + 0x18, 0x0a, 0x07, 0x70, 0x61, 0x72, 0x65, 0x6e, 0x74, 0x73, 0x18, 0x03, 0x20, 0x03, 0x28, 0x09, + 0x52, 0x07, 0x70, 0x61, 0x72, 0x65, 0x6e, 0x74, 0x73, 0x22, 0x42, 0x0a, 0x05, 0x53, 0x74, 0x61, + 0x74, 0x65, 0x12, 0x0f, 0x0a, 0x0b, 0x55, 0x4e, 0x53, 0x50, 0x45, 0x43, 0x49, 0x46, 0x49, 0x45, + 0x44, 0x10, 0x00, 0x12, 0x0a, 0x0a, 0x06, 0x4f, 0x4e, 0x4c, 0x49, 0x4e, 0x45, 0x10, 0x01, 0x12, + 0x0b, 0x0a, 0x07, 0x4f, 0x46, 0x46, 0x4c, 0x49, 0x4e, 0x45, 0x10, 0x02, 0x12, 0x0f, 0x0a, 0x0b, + 0x4d, 0x41, 0x49, 0x4e, 0x54, 0x45, 0x4e, 0x41, 0x4e, 0x43, 0x45, 0x10, 0x03, 0x22, 0x50, 0x0a, + 0x06, 0x4e, 0x65, 0x74, 0x6d, 0x61, 0x70, 0x12, 0x14, 0x0a, 0x05, 0x65, 0x70, 0x6f, 0x63, 0x68, + 0x18, 0x01, 0x20, 0x01, 0x28, 0x04, 0x52, 0x05, 0x65, 0x70, 0x6f, 0x63, 0x68, 0x12, 0x30, 0x0a, + 0x05, 0x6e, 0x6f, 0x64, 0x65, 0x73, 0x18, 0x02, 0x20, 0x03, 0x28, 0x0b, 0x32, 0x1a, 0x2e, 0x6e, + 0x65, 0x6f, 0x2e, 0x66, 0x73, 0x2e, 0x76, 0x32, 0x2e, 0x6e, 0x65, 0x74, 0x6d, 0x61, 0x70, 0x2e, + 0x4e, 0x6f, 0x64, 0x65, 0x49, 0x6e, 0x66, 0x6f, 0x52, 0x05, 0x6e, 0x6f, 0x64, 0x65, 0x73, 0x22, + 0x8f, 0x01, 0x0a, 0x0d, 0x4e, 0x65, 0x74, 0x77, 0x6f, 0x72, 0x6b, 0x43, 0x6f, 0x6e, 0x66, 0x69, + 0x67, 0x12, 0x49, 0x0a, 0x0a, 0x70, 0x61, 0x72, 0x61, 0x6d, 0x65, 0x74, 0x65, 0x72, 0x73, 0x18, + 0x01, 0x20, 0x03, 0x28, 0x0b, 0x32, 0x29, 0x2e, 0x6e, 0x65, 0x6f, 0x2e, 0x66, 0x73, 0x2e, 0x76, + 0x32, 0x2e, 0x6e, 0x65, 0x74, 0x6d, 0x61, 0x70, 0x2e, 0x4e, 0x65, 0x74, 0x77, 0x6f, 0x72, 0x6b, + 0x43, 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x2e, 0x50, 0x61, 0x72, 0x61, 0x6d, 0x65, 0x74, 0x65, 0x72, + 0x52, 0x0a, 0x70, 0x61, 0x72, 0x61, 0x6d, 0x65, 0x74, 0x65, 0x72, 0x73, 0x1a, 0x33, 0x0a, 0x09, + 0x50, 0x61, 0x72, 0x61, 0x6d, 0x65, 0x74, 0x65, 0x72, 0x12, 0x10, 0x0a, 0x03, 0x6b, 0x65, 0x79, + 0x18, 0x01, 0x20, 0x01, 0x28, 0x0c, 0x52, 0x03, 0x6b, 0x65, 0x79, 0x12, 0x14, 0x0a, 0x05, 0x76, + 0x61, 0x6c, 0x75, 0x65, 0x18, 0x02, 0x20, 0x01, 0x28, 0x0c, 0x52, 0x05, 0x76, 0x61, 0x6c, 0x75, + 0x65, 0x22, 0xbf, 0x01, 0x0a, 0x0b, 0x4e, 0x65, 0x74, 0x77, 0x6f, 0x72, 0x6b, 0x49, 0x6e, 0x66, + 0x6f, 0x12, 0x23, 0x0a, 0x0d, 0x63, 0x75, 0x72, 0x72, 0x65, 0x6e, 0x74, 0x5f, 0x65, 0x70, 0x6f, + 0x63, 0x68, 0x18, 0x01, 0x20, 0x01, 0x28, 0x04, 0x52, 0x0c, 0x63, 0x75, 0x72, 0x72, 0x65, 0x6e, + 0x74, 0x45, 0x70, 0x6f, 0x63, 0x68, 0x12, 0x21, 0x0a, 0x0c, 0x6d, 0x61, 0x67, 0x69, 0x63, 0x5f, + 0x6e, 0x75, 0x6d, 0x62, 0x65, 0x72, 0x18, 0x02, 0x20, 0x01, 0x28, 0x04, 0x52, 0x0b, 0x6d, 0x61, + 0x67, 0x69, 0x63, 0x4e, 0x75, 0x6d, 0x62, 0x65, 0x72, 0x12, 0x20, 0x0a, 0x0c, 0x6d, 0x73, 0x5f, + 0x70, 0x65, 0x72, 0x5f, 0x62, 0x6c, 0x6f, 0x63, 0x6b, 0x18, 0x03, 0x20, 0x01, 0x28, 0x03, 0x52, + 0x0a, 0x6d, 0x73, 0x50, 0x65, 0x72, 0x42, 0x6c, 0x6f, 0x63, 0x6b, 0x12, 0x46, 0x0a, 0x0e, 0x6e, + 0x65, 0x74, 0x77, 0x6f, 0x72, 0x6b, 0x5f, 0x63, 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x18, 0x04, 0x20, + 0x01, 0x28, 0x0b, 0x32, 0x1f, 0x2e, 0x6e, 0x65, 0x6f, 0x2e, 0x66, 0x73, 0x2e, 0x76, 0x32, 0x2e, + 0x6e, 0x65, 0x74, 0x6d, 0x61, 0x70, 0x2e, 0x4e, 0x65, 0x74, 0x77, 0x6f, 0x72, 0x6b, 0x43, 0x6f, + 0x6e, 0x66, 0x69, 0x67, 0x52, 0x0d, 0x6e, 0x65, 0x74, 0x77, 0x6f, 0x72, 0x6b, 0x43, 0x6f, 0x6e, + 0x66, 0x69, 0x67, 0x2a, 0x67, 0x0a, 0x09, 0x4f, 0x70, 0x65, 0x72, 0x61, 0x74, 0x69, 0x6f, 0x6e, + 0x12, 0x19, 0x0a, 0x15, 0x4f, 0x50, 0x45, 0x52, 0x41, 0x54, 0x49, 0x4f, 0x4e, 0x5f, 0x55, 0x4e, + 0x53, 0x50, 0x45, 0x43, 0x49, 0x46, 0x49, 0x45, 0x44, 0x10, 0x00, 0x12, 0x06, 0x0a, 0x02, 0x45, + 0x51, 0x10, 0x01, 0x12, 0x06, 0x0a, 0x02, 0x4e, 0x45, 0x10, 0x02, 0x12, 0x06, 0x0a, 0x02, 0x47, + 0x54, 0x10, 0x03, 0x12, 0x06, 0x0a, 0x02, 0x47, 0x45, 0x10, 0x04, 0x12, 0x06, 0x0a, 0x02, 0x4c, + 0x54, 0x10, 0x05, 0x12, 0x06, 0x0a, 0x02, 0x4c, 0x45, 0x10, 0x06, 0x12, 0x06, 0x0a, 0x02, 0x4f, + 0x52, 0x10, 0x07, 0x12, 0x07, 0x0a, 0x03, 0x41, 0x4e, 0x44, 0x10, 0x08, 0x2a, 0x38, 0x0a, 0x06, + 0x43, 0x6c, 0x61, 0x75, 0x73, 0x65, 0x12, 0x16, 0x0a, 0x12, 0x43, 0x4c, 0x41, 0x55, 0x53, 0x45, + 0x5f, 0x55, 0x4e, 0x53, 0x50, 0x45, 0x43, 0x49, 0x46, 0x49, 0x45, 0x44, 0x10, 0x00, 0x12, 0x08, + 0x0a, 0x04, 0x53, 0x41, 0x4d, 0x45, 0x10, 0x01, 0x12, 0x0c, 0x0a, 0x08, 0x44, 0x49, 0x53, 0x54, + 0x49, 0x4e, 0x43, 0x54, 0x10, 0x02, 0x42, 0x56, 0x5a, 0x37, 0x67, 0x69, 0x74, 0x68, 0x75, 0x62, + 0x2e, 0x63, 0x6f, 0x6d, 0x2f, 0x6e, 0x73, 0x70, 0x63, 0x63, 0x2d, 0x64, 0x65, 0x76, 0x2f, 0x6e, + 0x65, 0x6f, 0x66, 0x73, 0x2d, 0x61, 0x70, 0x69, 0x2d, 0x67, 0x6f, 0x2f, 0x76, 0x32, 0x2f, 0x6e, + 0x65, 0x74, 0x6d, 0x61, 0x70, 0x2f, 0x67, 0x72, 0x70, 0x63, 0x3b, 0x6e, 0x65, 0x74, 0x6d, 0x61, + 0x70, 0xaa, 0x02, 0x1a, 0x4e, 0x65, 0x6f, 0x2e, 0x46, 0x69, 0x6c, 0x65, 0x53, 0x74, 0x6f, 0x72, + 0x61, 0x67, 0x65, 0x2e, 0x41, 0x50, 0x49, 0x2e, 0x4e, 0x65, 0x74, 0x6d, 0x61, 0x70, 0x62, 0x06, + 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x33, +} + +var ( + file_netmap_grpc_types_proto_rawDescOnce sync.Once + file_netmap_grpc_types_proto_rawDescData = file_netmap_grpc_types_proto_rawDesc +) + +func file_netmap_grpc_types_proto_rawDescGZIP() []byte { + file_netmap_grpc_types_proto_rawDescOnce.Do(func() { + file_netmap_grpc_types_proto_rawDescData = protoimpl.X.CompressGZIP(file_netmap_grpc_types_proto_rawDescData) + }) + return file_netmap_grpc_types_proto_rawDescData +} + +var file_netmap_grpc_types_proto_enumTypes = make([]protoimpl.EnumInfo, 3) +var file_netmap_grpc_types_proto_msgTypes = make([]protoimpl.MessageInfo, 10) +var file_netmap_grpc_types_proto_goTypes = []interface{}{ + (Operation)(0), // 0: neo.fs.v2.netmap.Operation + (Clause)(0), // 1: neo.fs.v2.netmap.Clause + (NodeInfo_State)(0), // 2: neo.fs.v2.netmap.NodeInfo.State + (*Filter)(nil), // 3: neo.fs.v2.netmap.Filter + (*Selector)(nil), // 4: neo.fs.v2.netmap.Selector + (*Replica)(nil), // 5: neo.fs.v2.netmap.Replica + (*PlacementPolicy)(nil), // 6: neo.fs.v2.netmap.PlacementPolicy + (*NodeInfo)(nil), // 7: neo.fs.v2.netmap.NodeInfo + (*Netmap)(nil), // 8: neo.fs.v2.netmap.Netmap + (*NetworkConfig)(nil), // 9: neo.fs.v2.netmap.NetworkConfig + (*NetworkInfo)(nil), // 10: neo.fs.v2.netmap.NetworkInfo + (*NodeInfo_Attribute)(nil), // 11: neo.fs.v2.netmap.NodeInfo.Attribute + (*NetworkConfig_Parameter)(nil), // 12: neo.fs.v2.netmap.NetworkConfig.Parameter + (*refs.SubnetID)(nil), // 13: neo.fs.v2.refs.SubnetID +} +var file_netmap_grpc_types_proto_depIdxs = []int32{ + 0, // 0: neo.fs.v2.netmap.Filter.op:type_name -> neo.fs.v2.netmap.Operation + 3, // 1: neo.fs.v2.netmap.Filter.filters:type_name -> neo.fs.v2.netmap.Filter + 1, // 2: neo.fs.v2.netmap.Selector.clause:type_name -> neo.fs.v2.netmap.Clause + 5, // 3: neo.fs.v2.netmap.PlacementPolicy.replicas:type_name -> neo.fs.v2.netmap.Replica + 4, // 4: neo.fs.v2.netmap.PlacementPolicy.selectors:type_name -> neo.fs.v2.netmap.Selector + 3, // 5: neo.fs.v2.netmap.PlacementPolicy.filters:type_name -> neo.fs.v2.netmap.Filter + 13, // 6: neo.fs.v2.netmap.PlacementPolicy.subnet_id:type_name -> neo.fs.v2.refs.SubnetID + 11, // 7: neo.fs.v2.netmap.NodeInfo.attributes:type_name -> neo.fs.v2.netmap.NodeInfo.Attribute + 2, // 8: neo.fs.v2.netmap.NodeInfo.state:type_name -> neo.fs.v2.netmap.NodeInfo.State + 7, // 9: neo.fs.v2.netmap.Netmap.nodes:type_name -> neo.fs.v2.netmap.NodeInfo + 12, // 10: neo.fs.v2.netmap.NetworkConfig.parameters:type_name -> neo.fs.v2.netmap.NetworkConfig.Parameter + 9, // 11: neo.fs.v2.netmap.NetworkInfo.network_config:type_name -> neo.fs.v2.netmap.NetworkConfig + 12, // [12:12] is the sub-list for method output_type + 12, // [12:12] is the sub-list for method input_type + 12, // [12:12] is the sub-list for extension type_name + 12, // [12:12] is the sub-list for extension extendee + 0, // [0:12] is the sub-list for field type_name +} + +func init() { file_netmap_grpc_types_proto_init() } +func file_netmap_grpc_types_proto_init() { + if File_netmap_grpc_types_proto != nil { + return + } + if !protoimpl.UnsafeEnabled { + file_netmap_grpc_types_proto_msgTypes[0].Exporter = func(v interface{}, i int) interface{} { + switch v := v.(*Filter); i { + case 0: + return &v.state + case 1: + return &v.sizeCache + case 2: + return &v.unknownFields + default: + return nil + } + } + file_netmap_grpc_types_proto_msgTypes[1].Exporter = func(v interface{}, i int) interface{} { + switch v := v.(*Selector); i { + case 0: + return &v.state + case 1: + return &v.sizeCache + case 2: + return &v.unknownFields + default: + return nil + } + } + file_netmap_grpc_types_proto_msgTypes[2].Exporter = func(v interface{}, i int) interface{} { + switch v := v.(*Replica); i { + case 0: + return &v.state + case 1: + return &v.sizeCache + case 2: + return &v.unknownFields + default: + return nil + } + } + file_netmap_grpc_types_proto_msgTypes[3].Exporter = func(v interface{}, i int) interface{} { + switch v := v.(*PlacementPolicy); i { + case 0: + return &v.state + case 1: + return &v.sizeCache + case 2: + return &v.unknownFields + default: + return nil + } + } + file_netmap_grpc_types_proto_msgTypes[4].Exporter = func(v interface{}, i int) interface{} { + switch v := v.(*NodeInfo); i { + case 0: + return &v.state + case 1: + return &v.sizeCache + case 2: + return &v.unknownFields + default: + return nil + } + } + file_netmap_grpc_types_proto_msgTypes[5].Exporter = func(v interface{}, i int) interface{} { + switch v := v.(*Netmap); i { + case 0: + return &v.state + case 1: + return &v.sizeCache + case 2: + return &v.unknownFields + default: + return nil + } + } + file_netmap_grpc_types_proto_msgTypes[6].Exporter = func(v interface{}, i int) interface{} { + switch v := v.(*NetworkConfig); i { + case 0: + return &v.state + case 1: + return &v.sizeCache + case 2: + return &v.unknownFields + default: + return nil + } + } + file_netmap_grpc_types_proto_msgTypes[7].Exporter = func(v interface{}, i int) interface{} { + switch v := v.(*NetworkInfo); i { + case 0: + return &v.state + case 1: + return &v.sizeCache + case 2: + return &v.unknownFields + default: + return nil + } + } + file_netmap_grpc_types_proto_msgTypes[8].Exporter = func(v interface{}, i int) interface{} { + switch v := v.(*NodeInfo_Attribute); i { + case 0: + return &v.state + case 1: + return &v.sizeCache + case 2: + return &v.unknownFields + default: + return nil + } + } + file_netmap_grpc_types_proto_msgTypes[9].Exporter = func(v interface{}, i int) interface{} { + switch v := v.(*NetworkConfig_Parameter); i { + case 0: + return &v.state + case 1: + return &v.sizeCache + case 2: + return &v.unknownFields + default: + return nil + } + } + } + type x struct{} + out := protoimpl.TypeBuilder{ + File: protoimpl.DescBuilder{ + GoPackagePath: reflect.TypeOf(x{}).PkgPath(), + RawDescriptor: file_netmap_grpc_types_proto_rawDesc, + NumEnums: 3, + NumMessages: 10, + NumExtensions: 0, + NumServices: 0, + }, + GoTypes: file_netmap_grpc_types_proto_goTypes, + DependencyIndexes: file_netmap_grpc_types_proto_depIdxs, + EnumInfos: file_netmap_grpc_types_proto_enumTypes, + MessageInfos: file_netmap_grpc_types_proto_msgTypes, + }.Build() + File_netmap_grpc_types_proto = out.File + file_netmap_grpc_types_proto_rawDesc = nil + file_netmap_grpc_types_proto_goTypes = nil + file_netmap_grpc_types_proto_depIdxs = nil +} diff --git a/api/object/encoding.go b/api/object/encoding.go new file mode 100644 index 000000000..cabfdab0b --- /dev/null +++ b/api/object/encoding.go @@ -0,0 +1,760 @@ +package object + +import ( + "fmt" + + "github.com/nspcc-dev/neofs-sdk-go/internal/proto" +) + +const ( + _ = iota + fieldSplitParent + fieldSplitPrevious + fieldSplitParentSignature + fieldSplitParentHeader + fieldSplitChildren + fieldSplitID + fieldSplitFirst +) + +func (x *Header_Split) MarshaledSize() int { + var sz int + if x != nil { + sz = proto.SizeNested(fieldSplitParent, x.Parent) + + proto.SizeNested(fieldSplitPrevious, x.Previous) + + proto.SizeNested(fieldSplitParentSignature, x.ParentSignature) + + proto.SizeNested(fieldSplitParentHeader, x.ParentHeader) + + proto.SizeBytes(fieldSplitID, x.SplitId) + + proto.SizeNested(fieldSplitFirst, x.First) + for i := range x.Children { + sz += proto.SizeNested(fieldSplitChildren, x.Children[i]) + } + } + return sz +} + +func (x *Header_Split) MarshalStable(b []byte) { + if x != nil { + off := proto.MarshalNested(b, fieldSplitParent, x.Parent) + off += proto.MarshalNested(b[off:], fieldSplitPrevious, x.Previous) + off += proto.MarshalNested(b[off:], fieldSplitParentSignature, x.ParentSignature) + off += proto.MarshalNested(b[off:], fieldSplitParentHeader, x.ParentHeader) + off += proto.MarshalBytes(b[off:], fieldSplitID, x.SplitId) + for i := range x.Children { + off += proto.MarshalNested(b[off:], fieldSplitChildren, x.Children[i]) + } + proto.MarshalNested(b[off:], fieldSplitFirst, x.First) + } +} + +const ( + _ = iota + fieldAttributeKey + fieldAttributeValue +) + +func (x *Header_Attribute) MarshaledSize() int { + var sz int + if x != nil { + sz = proto.SizeBytes(fieldAttributeKey, x.Key) + + proto.SizeBytes(fieldAttributeValue, x.Value) + } + return sz +} + +func (x *Header_Attribute) MarshalStable(b []byte) { + if x != nil { + off := proto.MarshalBytes(b, fieldAttributeKey, x.Key) + proto.MarshalBytes(b[off:], fieldAttributeValue, x.Value) + } +} + +const ( + _ = iota + fieldShortHeaderVersion + fieldShortHeaderCreationEpoch + fieldShortHeaderOwner + fieldShortHeaderType + fieldShortHeaderLen + fieldShortHeaderChecksum + fieldShortHeaderHomomorphic +) + +func (x *ShortHeader) MarshaledSize() int { + var sz int + if x != nil { + sz = proto.SizeNested(fieldShortHeaderVersion, x.Version) + + proto.SizeVarint(fieldShortHeaderCreationEpoch, x.CreationEpoch) + + proto.SizeNested(fieldShortHeaderOwner, x.OwnerId) + + proto.SizeVarint(fieldShortHeaderType, int32(x.ObjectType)) + + proto.SizeVarint(fieldShortHeaderLen, x.PayloadLength) + + proto.SizeNested(fieldShortHeaderChecksum, x.PayloadHash) + + proto.SizeNested(fieldShortHeaderHomomorphic, x.HomomorphicHash) + } + return sz +} + +func (x *ShortHeader) MarshalStable(b []byte) { + if x != nil { + off := proto.MarshalNested(b, fieldShortHeaderVersion, x.Version) + off += proto.MarshalVarint(b[off:], fieldShortHeaderCreationEpoch, x.CreationEpoch) + off += proto.MarshalNested(b[off:], fieldShortHeaderOwner, x.OwnerId) + off += proto.MarshalVarint(b[off:], fieldShortHeaderType, int32(x.ObjectType)) + off += proto.MarshalVarint(b[off:], fieldShortHeaderLen, x.PayloadLength) + off += proto.MarshalNested(b[off:], fieldShortHeaderChecksum, x.PayloadHash) + off += proto.MarshalNested(b[off:], fieldShortHeaderHomomorphic, x.HomomorphicHash) + } +} + +const ( + _ = iota + fieldHeaderVersion + fieldHeaderContainer + fieldHeaderOwner + fieldHeaderCreationEpoch + fieldHeaderLen + fieldHeaderChecksum + fieldHeaderType + fieldHeaderHomomorphic + fieldHeaderSession + fieldHeaderAttributes + fieldHeaderSplit +) + +func (x *Header) MarshaledSize() int { + var sz int + if x != nil { + sz = proto.SizeNested(fieldHeaderVersion, x.Version) + + proto.SizeNested(fieldHeaderContainer, x.ContainerId) + + proto.SizeNested(fieldHeaderOwner, x.OwnerId) + + proto.SizeVarint(fieldHeaderCreationEpoch, x.CreationEpoch) + + proto.SizeVarint(fieldHeaderLen, x.PayloadLength) + + proto.SizeNested(fieldHeaderChecksum, x.PayloadHash) + + proto.SizeVarint(fieldHeaderType, int32(x.ObjectType)) + + proto.SizeNested(fieldHeaderHomomorphic, x.HomomorphicHash) + + proto.SizeNested(fieldHeaderSession, x.SessionToken) + + proto.SizeNested(fieldHeaderSplit, x.Split) + for i := range x.Attributes { + sz += proto.SizeNested(fieldHeaderAttributes, x.Attributes[i]) + } + } + return sz +} + +func (x *Header) MarshalStable(b []byte) { + if x != nil { + off := proto.MarshalNested(b, fieldHeaderVersion, x.Version) + off += proto.MarshalNested(b[off:], fieldHeaderContainer, x.ContainerId) + off += proto.MarshalNested(b[off:], fieldHeaderOwner, x.OwnerId) + off += proto.MarshalVarint(b[off:], fieldHeaderCreationEpoch, x.CreationEpoch) + off += proto.MarshalVarint(b[off:], fieldHeaderLen, x.PayloadLength) + off += proto.MarshalNested(b[off:], fieldHeaderChecksum, x.PayloadHash) + off += proto.MarshalVarint(b[off:], fieldHeaderType, int32(x.ObjectType)) + off += proto.MarshalNested(b[off:], fieldHeaderHomomorphic, x.HomomorphicHash) + off += proto.MarshalNested(b[off:], fieldHeaderSession, x.SessionToken) + for i := range x.Attributes { + off += proto.MarshalNested(b[off:], fieldHeaderAttributes, x.Attributes[i]) + } + proto.MarshalNested(b[off:], fieldHeaderSplit, x.Split) + } +} + +const ( + _ = iota + fieldObjectID + fieldObjectSignature + fieldObjectHeader + fieldObjectPayload +) + +func (x *Object) MarshaledSize() int { + var sz int + if x != nil { + sz = proto.SizeNested(fieldObjectID, x.ObjectId) + + proto.SizeNested(fieldObjectSignature, x.Signature) + + proto.SizeNested(fieldObjectHeader, x.Header) + + proto.SizeBytes(fieldObjectPayload, x.Payload) + } + return sz +} + +func (x *Object) MarshalStable(b []byte) { + if x != nil { + off := proto.MarshalNested(b, fieldObjectID, x.ObjectId) + off += proto.MarshalNested(b[off:], fieldObjectSignature, x.Signature) + off += proto.MarshalNested(b[off:], fieldObjectHeader, x.Header) + proto.MarshalBytes(b[off:], fieldObjectPayload, x.Payload) + } +} + +const ( + _ = iota + fieldSplitInfoID + fieldSplitInfoLast + fieldSplitInfoLink + fieldSplitInfoFirst +) + +func (x *SplitInfo) MarshaledSize() int { + var sz int + if x != nil { + sz = proto.SizeBytes(fieldSplitInfoID, x.SplitId) + + proto.SizeNested(fieldSplitInfoLast, x.LastPart) + + proto.SizeNested(fieldSplitInfoLink, x.Link) + + proto.SizeNested(fieldSplitInfoFirst, x.FirstPart) + } + return sz +} + +func (x *SplitInfo) MarshalStable(b []byte) { + if x != nil { + off := proto.MarshalBytes(b, fieldSplitInfoID, x.SplitId) + off += proto.MarshalNested(b[off:], fieldSplitInfoLast, x.LastPart) + off += proto.MarshalNested(b[off:], fieldSplitInfoLink, x.Link) + proto.MarshalNested(b[off:], fieldSplitInfoFirst, x.FirstPart) + } +} + +const ( + _ = iota + fieldGetReqAddress + fieldGetReqRaw +) + +func (x *GetRequest_Body) MarshaledSize() int { + var sz int + if x != nil { + sz = proto.SizeNested(fieldGetReqAddress, x.Address) + + proto.SizeBool(fieldGetReqRaw, x.Raw) + } + return sz +} + +func (x *GetRequest_Body) MarshalStable(b []byte) { + if x != nil { + off := proto.MarshalNested(b, fieldGetReqAddress, x.Address) + proto.MarshalBool(b[off:], fieldGetReqRaw, x.Raw) + } +} + +const ( + _ = iota + fieldGetRespInitID + fieldGetRespInitSignature + fieldGetRespInitHeader +) + +func (x *GetResponse_Body_Init) MarshaledSize() int { + var sz int + if x != nil { + sz = proto.SizeNested(fieldGetRespInitID, x.ObjectId) + + proto.SizeNested(fieldGetRespInitSignature, x.Signature) + + proto.SizeNested(fieldGetRespInitHeader, x.Header) + } + return sz +} + +func (x *GetResponse_Body_Init) MarshalStable(b []byte) { + if x != nil { + off := proto.MarshalNested(b, fieldGetRespInitID, x.ObjectId) + off += proto.MarshalNested(b[off:], fieldGetRespInitSignature, x.Signature) + proto.MarshalNested(b[off:], fieldGetRespInitHeader, x.Header) + } +} + +const ( + _ = iota + fieldGetRespInit + fieldGetRespChunk + fieldGetRespSplitInfo +) + +func (x *GetResponse_Body) MarshaledSize() int { + var sz int + if x != nil { + switch p := x.ObjectPart.(type) { + default: + panic(fmt.Sprintf("unexpected object part %T", x.ObjectPart)) + case nil: + case *GetResponse_Body_Init_: + if p != nil { + sz = proto.SizeNested(fieldGetRespInit, p.Init) + } + case *GetResponse_Body_Chunk: + if p != nil { + sz = proto.SizeBytes(fieldGetRespChunk, p.Chunk) + } + case *GetResponse_Body_SplitInfo: + if p != nil { + sz = proto.SizeNested(fieldGetRespSplitInfo, p.SplitInfo) + } + } + } + return sz +} + +func (x *GetResponse_Body) MarshalStable(b []byte) { + if x != nil { + switch p := x.ObjectPart.(type) { + default: + panic(fmt.Sprintf("unexpected object part %T", x.ObjectPart)) + case nil: + case *GetResponse_Body_Init_: + if p != nil { + proto.MarshalNested(b, fieldGetRespInit, p.Init) + } + case *GetResponse_Body_Chunk: + if p != nil { + proto.MarshalBytes(b, fieldGetRespChunk, p.Chunk) + } + case *GetResponse_Body_SplitInfo: + if p != nil { + proto.MarshalNested(b, fieldGetRespSplitInfo, p.SplitInfo) + } + } + } +} + +const ( + _ = iota + fieldHeadReqAddress + fieldHeadReqMain + fieldHeadReqRaw +) + +func (x *HeadRequest_Body) MarshaledSize() int { + var sz int + if x != nil { + sz = proto.SizeNested(fieldHeadReqAddress, x.Address) + + proto.SizeBool(fieldHeadReqMain, x.MainOnly) + + proto.SizeBool(fieldHeadReqRaw, x.Raw) + } + return sz +} + +func (x *HeadRequest_Body) MarshalStable(b []byte) { + if x != nil { + off := proto.MarshalNested(b, fieldHeadReqAddress, x.Address) + off += proto.MarshalBool(b[off:], fieldHeadReqMain, x.MainOnly) + proto.MarshalBool(b[off:], fieldHeadReqRaw, x.Raw) + } +} + +const ( + _ = iota + fieldHeaderSignatureHdr + fieldHeaderSignatureSig +) + +func (x *HeaderWithSignature) MarshaledSize() int { + var sz int + if x != nil { + sz = proto.SizeNested(fieldHeaderSignatureHdr, x.Header) + + proto.SizeNested(fieldHeaderSignatureSig, x.Signature) + } + return sz +} + +func (x *HeaderWithSignature) MarshalStable(b []byte) { + if x != nil { + off := proto.MarshalNested(b, fieldHeaderSignatureHdr, x.Header) + proto.MarshalNested(b[off:], fieldHeaderSignatureSig, x.Signature) + } +} + +const ( + _ = iota + fieldHeadRespHeader + fieldHeadRespShort + fieldHeadRespSplitInfo +) + +func (x *HeadResponse_Body) MarshaledSize() int { + var sz int + if x != nil { + switch h := x.Head.(type) { + default: + panic(fmt.Sprintf("unexpected head part %T", x.Head)) + case nil: + case *HeadResponse_Body_Header: + if h != nil { + sz = proto.SizeNested(fieldHeadRespHeader, h.Header) + } + case *HeadResponse_Body_ShortHeader: + if h != nil { + sz = proto.SizeNested(fieldHeadRespShort, h.ShortHeader) + } + case *HeadResponse_Body_SplitInfo: + if h != nil { + sz = proto.SizeNested(fieldHeadRespSplitInfo, h.SplitInfo) + } + } + } + return sz +} + +func (x *HeadResponse_Body) MarshalStable(b []byte) { + if x != nil { + switch h := x.Head.(type) { + default: + panic(fmt.Sprintf("unexpected head part %T", x.Head)) + case nil: + case *HeadResponse_Body_Header: + if h != nil { + proto.MarshalNested(b, fieldHeadRespHeader, h.Header) + } + case *HeadResponse_Body_ShortHeader: + if h != nil { + proto.MarshalNested(b, fieldHeadRespShort, h.ShortHeader) + } + case *HeadResponse_Body_SplitInfo: + if h != nil { + proto.MarshalNested(b, fieldHeadRespSplitInfo, h.SplitInfo) + } + } + } +} + +const ( + _ = iota + fieldRangeOff + fieldRangeLen +) + +func (x *Range) MarshaledSize() int { + var sz int + if x != nil { + sz = proto.SizeVarint(fieldRangeOff, x.Offset) + + proto.SizeVarint(fieldRangeLen, x.Length) + } + return sz +} + +func (x *Range) MarshalStable(b []byte) { + if x != nil { + off := proto.MarshalVarint(b, fieldRangeOff, x.Offset) + proto.MarshalVarint(b[off:], fieldRangeLen, x.Length) + } +} + +const ( + _ = iota + fieldRangeReqAddress + fieldRangeReqRange + fieldRangeReqRaw +) + +func (x *GetRangeRequest_Body) MarshaledSize() int { + var sz int + if x != nil { + sz = proto.SizeNested(fieldRangeReqAddress, x.Address) + + proto.SizeNested(fieldRangeReqRange, x.Range) + + proto.SizeBool(fieldRangeReqRaw, x.Raw) + } + return sz +} + +func (x *GetRangeRequest_Body) MarshalStable(b []byte) { + if x != nil { + off := proto.MarshalNested(b, fieldRangeReqAddress, x.Address) + off += proto.MarshalNested(b[off:], fieldRangeReqRange, x.Range) + proto.MarshalBool(b[off:], fieldRangeReqRaw, x.Raw) + } +} + +const ( + _ = iota + fieldPutReqInitID + fieldPutReqInitSignature + fieldPutReqInitHeader + fieldPutReqInitCopies +) + +const ( + _ = iota + fieldRangeRespChunk + fieldRangeRespSplitInfo +) + +func (x *GetRangeResponse_Body) MarshaledSize() int { + var sz int + if x != nil { + switch p := x.RangePart.(type) { + default: + panic(fmt.Sprintf("unexpected range part %T", x.RangePart)) + case nil: + case *GetRangeResponse_Body_Chunk: + if p != nil { + sz = proto.SizeBytes(fieldRangeRespChunk, p.Chunk) + } + case *GetRangeResponse_Body_SplitInfo: + if p != nil { + sz = proto.SizeNested(fieldRangeRespSplitInfo, p.SplitInfo) + } + } + } + return sz +} + +func (x *GetRangeResponse_Body) MarshalStable(b []byte) { + if x != nil { + switch p := x.RangePart.(type) { + default: + panic(fmt.Sprintf("unexpected range part %T", x.RangePart)) + case nil: + case *GetRangeResponse_Body_Chunk: + if p != nil { + proto.MarshalBytes(b, fieldRangeRespChunk, p.Chunk) + } + case *GetRangeResponse_Body_SplitInfo: + if p != nil { + proto.MarshalNested(b, fieldRangeRespSplitInfo, p.SplitInfo) + } + } + } +} + +const ( + _ = iota + fieldRangeHashReqAddress + fieldRangeHashReqRanges + fieldRangeHashReqSalt + fieldRangeHashReqType +) + +func (x *GetRangeHashRequest_Body) MarshaledSize() int { + var sz int + if x != nil { + sz = proto.SizeNested(fieldRangeHashReqAddress, x.Address) + + proto.SizeBytes(fieldRangeHashReqSalt, x.Salt) + + proto.SizeVarint(fieldRangeHashReqType, int32(x.Type)) + for i := range x.Ranges { + sz += proto.SizeNested(fieldRangeHashReqRanges, x.Ranges[i]) + } + } + return sz +} + +func (x *GetRangeHashRequest_Body) MarshalStable(b []byte) { + if x != nil { + off := proto.MarshalNested(b, fieldRangeHashReqAddress, x.Address) + for i := range x.Ranges { + off += proto.MarshalNested(b[off:], fieldRangeHashReqRanges, x.Ranges[i]) + } + off += proto.MarshalBytes(b[off:], fieldRangeHashReqSalt, x.Salt) + proto.MarshalVarint(b[off:], fieldRangeHashReqType, int32(x.Type)) + } +} + +const ( + _ = iota + fieldRangeHashRespType + fieldRangeHashRespHashes +) + +func (x *GetRangeHashResponse_Body) MarshaledSize() int { + var sz int + if x != nil { + sz = proto.SizeVarint(fieldRangeHashRespType, int32(x.Type)) + + proto.SizeRepeatedBytes(fieldRangeHashRespHashes, x.HashList) + } + return sz +} + +func (x *GetRangeHashResponse_Body) MarshalStable(b []byte) { + if x != nil { + off := proto.MarshalVarint(b, fieldRangeHashRespType, int32(x.Type)) + proto.MarshalRepeatedBytes(b[off:], fieldRangeHashRespHashes, x.HashList) + } +} + +func (x *PutRequest_Body_Init) MarshaledSize() int { + var sz int + if x != nil { + sz = proto.SizeNested(fieldPutReqInitID, x.ObjectId) + + proto.SizeNested(fieldPutReqInitSignature, x.Signature) + + proto.SizeNested(fieldPutReqInitHeader, x.Header) + + proto.SizeVarint(fieldPutReqInitCopies, x.CopiesNumber) + } + return sz +} + +func (x *PutRequest_Body_Init) MarshalStable(b []byte) { + if x != nil { + off := proto.MarshalNested(b, fieldPutReqInitID, x.ObjectId) + off += proto.MarshalNested(b[off:], fieldPutReqInitSignature, x.Signature) + off += proto.MarshalNested(b[off:], fieldPutReqInitHeader, x.Header) + proto.MarshalVarint(b[off:], fieldPutReqInitCopies, x.CopiesNumber) + } +} + +const ( + _ = iota + fieldPutReqInit + fieldPutReqChunk +) + +func (x *PutRequest_Body) MarshaledSize() int { + var sz int + if x != nil { + switch p := x.ObjectPart.(type) { + default: + panic(fmt.Sprintf("unexpected object part %T", x.ObjectPart)) + case nil: + case *PutRequest_Body_Init_: + sz = proto.SizeNested(fieldPutReqInit, p.Init) + case *PutRequest_Body_Chunk: + sz = proto.SizeBytes(fieldPutReqChunk, p.Chunk) + } + } + return sz +} + +func (x *PutRequest_Body) MarshalStable(b []byte) { + if x != nil { + switch p := x.ObjectPart.(type) { + default: + panic(fmt.Sprintf("unexpected object part %T", x.ObjectPart)) + case nil: + case *PutRequest_Body_Init_: + proto.MarshalNested(b, fieldPutReqInit, p.Init) + case *PutRequest_Body_Chunk: + proto.MarshalBytes(b, fieldPutReqChunk, p.Chunk) + } + } +} + +const ( + _ = iota + fieldPutRespID +) + +func (x *PutResponse_Body) MarshaledSize() int { + var sz int + if x != nil { + sz = proto.SizeNested(fieldPutRespID, x.ObjectId) + } + return sz +} + +func (x *PutResponse_Body) MarshalStable(b []byte) { + if x != nil { + proto.MarshalNested(b, fieldPutRespID, x.ObjectId) + } +} + +const ( + _ = iota + fieldDeleteReqAddress +) + +func (x *DeleteRequest_Body) MarshaledSize() int { + var sz int + if x != nil { + sz = proto.SizeNested(fieldDeleteReqAddress, x.Address) + } + return sz +} + +func (x *DeleteRequest_Body) MarshalStable(b []byte) { + if x != nil { + proto.MarshalNested(b, fieldDeleteReqAddress, x.Address) + } +} + +const ( + _ = iota + fieldDeleteRespTombstone +) + +func (x *DeleteResponse_Body) MarshaledSize() int { + var sz int + if x != nil { + sz = proto.SizeNested(fieldDeleteRespTombstone, x.Tombstone) + } + return sz +} + +func (x *DeleteResponse_Body) MarshalStable(b []byte) { + if x != nil { + proto.MarshalNested(b, fieldDeleteRespTombstone, x.Tombstone) + } +} + +const ( + _ = iota + fieldSearchFilterMatcher + fieldSearchFilterKey + fieldSearchFilterValue +) + +func (x *SearchRequest_Body_Filter) MarshaledSize() int { + var sz int + if x != nil { + sz = proto.SizeVarint(fieldSearchFilterMatcher, int32(x.MatchType)) + sz += proto.SizeBytes(fieldSearchFilterKey, x.Key) + sz += proto.SizeBytes(fieldSearchFilterValue, x.Value) + } + return sz +} + +func (x *SearchRequest_Body_Filter) MarshalStable(b []byte) { + if x != nil { + off := proto.MarshalVarint(b, fieldSearchFilterMatcher, int32(x.MatchType)) + off += proto.MarshalBytes(b[off:], fieldSearchFilterKey, x.Key) + proto.MarshalBytes(b[off:], fieldSearchFilterValue, x.Value) + } +} + +const ( + _ = iota + fieldSearchReqContainer + fieldSearchReqVersion + fieldSearchReqFilters +) + +func (x *SearchRequest_Body) MarshaledSize() int { + var sz int + if x != nil { + sz = proto.SizeNested(fieldSearchReqContainer, x.ContainerId) + sz += proto.SizeVarint(fieldSearchReqVersion, x.Version) + for i := range x.Filters { + sz += proto.SizeNested(fieldSearchReqFilters, x.Filters[i]) + } + } + return sz +} + +func (x *SearchRequest_Body) MarshalStable(b []byte) { + if x != nil { + off := proto.MarshalNested(b, fieldSearchReqContainer, x.ContainerId) + off += proto.MarshalVarint(b[off:], fieldSearchReqVersion, x.Version) + for i := range x.Filters { + off += proto.MarshalNested(b[off:], fieldSearchReqFilters, x.Filters[i]) + } + } +} + +const ( + _ = iota + fieldSearchRespIDList +) + +func (x *SearchResponse_Body) MarshaledSize() int { + var sz int + if x != nil { + for i := range x.IdList { + sz += proto.SizeNested(fieldSearchRespIDList, x.IdList[i]) + } + } + return sz +} + +func (x *SearchResponse_Body) MarshalStable(b []byte) { + if x != nil { + var off int + for i := range x.IdList { + off += proto.MarshalNested(b[off:], fieldSearchRespIDList, x.IdList[i]) + } + } +} diff --git a/api/object/encoding_test.go b/api/object/encoding_test.go new file mode 100644 index 000000000..76ff49fed --- /dev/null +++ b/api/object/encoding_test.go @@ -0,0 +1,613 @@ +package object_test + +import ( + "testing" + + "github.com/nspcc-dev/neofs-sdk-go/api/object" + "github.com/nspcc-dev/neofs-sdk-go/api/refs" + "github.com/nspcc-dev/neofs-sdk-go/api/session" + "github.com/stretchr/testify/require" + "google.golang.org/protobuf/proto" +) + +func TestObject(t *testing.T) { + v := &object.Object{ + ObjectId: &refs.ObjectID{Value: []byte("any_object_ID")}, + Signature: &refs.Signature{ + Key: []byte("any_public_key"), + Sign: []byte("any_signature"), + Scheme: 1, + }, + Header: &object.Header{ + Version: &refs.Version{Major: 2, Minor: 3}, + ContainerId: &refs.ContainerID{Value: []byte("any_container")}, + OwnerId: &refs.OwnerID{Value: []byte("any_owner")}, + CreationEpoch: 4, + PayloadLength: 5, + PayloadHash: &refs.Checksum{Type: 6, Sum: []byte("any_checksum")}, + ObjectType: 7, + HomomorphicHash: &refs.Checksum{Type: 8, Sum: []byte("any_homomorphic_checksum")}, + SessionToken: &session.SessionToken{ + Body: &session.SessionToken_Body{ + Id: []byte("any_ID"), + OwnerId: &refs.OwnerID{Value: []byte("any_owner")}, + Lifetime: &session.SessionToken_Body_TokenLifetime{Exp: 9, Nbf: 10, Iat: 11}, + SessionKey: []byte("any_key"), + }, + Signature: &refs.Signature{Key: []byte("any_key"), Sign: []byte("any_signature"), Scheme: 12}, + }, + Attributes: []*object.Header_Attribute{ + {Key: "attr_key1", Value: "attr_val1"}, + {Key: "attr_key2", Value: "attr_val2"}, + }, + Split: &object.Header_Split{ + Parent: &refs.ObjectID{Value: []byte("any_parent_ID")}, + Previous: &refs.ObjectID{Value: []byte("any_previous")}, + ParentSignature: &refs.Signature{ + Key: []byte("any_parent_public_key"), + Sign: []byte("any_parent_signature"), + Scheme: 13, + }, + ParentHeader: &object.Header{ + Version: &refs.Version{Major: 100, Minor: 101}, + ContainerId: &refs.ContainerID{Value: []byte("any_parent_container")}, + OwnerId: &refs.OwnerID{Value: []byte("any_parent_owner")}, + CreationEpoch: 102, + PayloadLength: 103, + PayloadHash: &refs.Checksum{Type: 104, Sum: []byte("any_parent_checksum")}, + ObjectType: 105, + HomomorphicHash: &refs.Checksum{Type: 106, Sum: []byte("any_parent_homomorphic_checksum")}, + Attributes: []*object.Header_Attribute{ + {Key: "parent_attr_key1", Value: "parent_attr_val2"}, + {Key: "parent_attr_key2", Value: "parent_attr_val2"}, + }, + }, + Children: []*refs.ObjectID{{Value: []byte("any_child1")}, {Value: []byte("any_child2")}}, + SplitId: []byte("any_split_ID"), + First: &refs.ObjectID{Value: []byte("any_first_ID")}, + }, + }, + Payload: []byte("any_payload"), + } + + sz := v.MarshaledSize() + b := make([]byte, sz) + v.MarshalStable(b) + + var res object.Object + err := proto.Unmarshal(b, &res) + require.NoError(t, err) + require.Empty(t, res.ProtoReflect().GetUnknown()) + require.Equal(t, v.ObjectId, res.ObjectId) + require.Equal(t, v.Signature, res.Signature) + require.Equal(t, v.Header, res.Header) + require.Equal(t, v.Payload, res.Payload) +} + +func TestGetRequest_Body(t *testing.T) { + v := &object.GetRequest_Body{ + Address: &refs.Address{ + ContainerId: &refs.ContainerID{Value: []byte("any_container")}, + ObjectId: &refs.ObjectID{Value: []byte("any_object")}, + }, + Raw: true, + } + + sz := v.MarshaledSize() + b := make([]byte, sz) + v.MarshalStable(b) + + var res object.GetRequest_Body + err := proto.Unmarshal(b, &res) + require.NoError(t, err) + require.Empty(t, res.ProtoReflect().GetUnknown()) + require.Equal(t, v.Address, res.Address) + require.Equal(t, v.Raw, res.Raw) +} + +func TestGetResponse_Body(t *testing.T) { + var v object.GetResponse_Body + + testWithPart := func(setPart func(*object.GetResponse_Body)) { + setPart(&v) + sz := v.MarshaledSize() + b := make([]byte, sz) + v.MarshalStable(b) + + var res object.GetResponse_Body + err := proto.Unmarshal(b, &res) + require.NoError(t, err) + require.Empty(t, res.ProtoReflect().GetUnknown()) + require.Equal(t, v.ObjectPart, res.ObjectPart) + } + + testWithPart(func(body *object.GetResponse_Body) { body.ObjectPart = nil }) + testWithPart(func(body *object.GetResponse_Body) { + body.ObjectPart = &object.GetResponse_Body_Chunk{Chunk: []byte("any_chunk")} + }) + testWithPart(func(body *object.GetResponse_Body) { + body.ObjectPart = &object.GetResponse_Body_SplitInfo{ + SplitInfo: &object.SplitInfo{ + SplitId: []byte("any_split_ID"), + LastPart: &refs.ObjectID{Value: []byte("any_last")}, + Link: &refs.ObjectID{Value: []byte("any_link")}, + FirstPart: &refs.ObjectID{Value: []byte("any_first")}, + }, + } + }) + testWithPart(func(body *object.GetResponse_Body) { + body.ObjectPart = &object.GetResponse_Body_Init_{ + Init: &object.GetResponse_Body_Init{ + ObjectId: &refs.ObjectID{Value: []byte("any_object_ID")}, + Signature: &refs.Signature{ + Key: []byte("any_public_key"), + Sign: []byte("any_signature"), + Scheme: 1, + }, + Header: &object.Header{ + Version: &refs.Version{Major: 2, Minor: 3}, + ContainerId: &refs.ContainerID{Value: []byte("any_container")}, + OwnerId: &refs.OwnerID{Value: []byte("any_owner")}, + CreationEpoch: 4, + PayloadLength: 5, + PayloadHash: &refs.Checksum{Type: 6, Sum: []byte("any_checksum")}, + ObjectType: 7, + HomomorphicHash: &refs.Checksum{Type: 8, Sum: []byte("any_homomorphic_checksum")}, + SessionToken: &session.SessionToken{ + Body: &session.SessionToken_Body{ + Id: []byte("any_ID"), + OwnerId: &refs.OwnerID{Value: []byte("any_owner")}, + Lifetime: &session.SessionToken_Body_TokenLifetime{Exp: 9, Nbf: 10, Iat: 11}, + SessionKey: []byte("any_key"), + }, + Signature: &refs.Signature{Key: []byte("any_key"), Sign: []byte("any_signature"), Scheme: 12}, + }, + Attributes: []*object.Header_Attribute{ + {Key: "attr_key1", Value: "attr_val1"}, + {Key: "attr_key2", Value: "attr_val2"}, + }, + Split: &object.Header_Split{ + Parent: &refs.ObjectID{Value: []byte("any_parent_ID")}, + Previous: &refs.ObjectID{Value: []byte("any_previous")}, + ParentSignature: &refs.Signature{ + Key: []byte("any_parent_public_key"), + Sign: []byte("any_parent_signature"), + Scheme: 13, + }, + ParentHeader: &object.Header{ + Version: &refs.Version{Major: 100, Minor: 101}, + ContainerId: &refs.ContainerID{Value: []byte("any_parent_container")}, + OwnerId: &refs.OwnerID{Value: []byte("any_parent_owner")}, + CreationEpoch: 102, + PayloadLength: 103, + PayloadHash: &refs.Checksum{Type: 104, Sum: []byte("any_parent_checksum")}, + ObjectType: 105, + HomomorphicHash: &refs.Checksum{Type: 106, Sum: []byte("any_parent_homomorphic_checksum")}, + Attributes: []*object.Header_Attribute{ + {Key: "parent_attr_key1", Value: "parent_attr_val2"}, + {Key: "parent_attr_key2", Value: "parent_attr_val2"}, + }, + }, + Children: []*refs.ObjectID{{Value: []byte("any_child1")}, {Value: []byte("any_child2")}}, + SplitId: []byte("any_split_ID"), + First: &refs.ObjectID{Value: []byte("any_first_ID")}, + }, + }, + }, + } + }) +} + +func TestHeadRequest_Body(t *testing.T) { + v := &object.HeadRequest_Body{ + Address: &refs.Address{ + ContainerId: &refs.ContainerID{Value: []byte("any_container")}, + ObjectId: &refs.ObjectID{Value: []byte("any_object")}, + }, + MainOnly: true, + Raw: true, + } + + sz := v.MarshaledSize() + b := make([]byte, sz) + v.MarshalStable(b) + + var res object.HeadRequest_Body + err := proto.Unmarshal(b, &res) + require.NoError(t, err) + require.Empty(t, res.ProtoReflect().GetUnknown()) + require.Equal(t, v.Address, res.Address) + require.Equal(t, v.MainOnly, res.MainOnly) + require.Equal(t, v.Raw, res.Raw) +} + +func TestHeadResponse_Body(t *testing.T) { + var v object.HeadResponse_Body + + testWithPart := func(setPart func(body *object.HeadResponse_Body)) { + setPart(&v) + sz := v.MarshaledSize() + b := make([]byte, sz) + v.MarshalStable(b) + + var res object.HeadResponse_Body + err := proto.Unmarshal(b, &res) + require.NoError(t, err) + require.Empty(t, res.ProtoReflect().GetUnknown()) + require.Equal(t, v.Head, res.Head) + } + + testWithPart(func(body *object.HeadResponse_Body) { body.Head = nil }) + + testWithPart(func(body *object.HeadResponse_Body) { + body.Head = &object.HeadResponse_Body_ShortHeader{ + ShortHeader: &object.ShortHeader{ + Version: &refs.Version{Major: 2, Minor: 3}, + OwnerId: &refs.OwnerID{Value: []byte("any_owner")}, + CreationEpoch: 4, + PayloadLength: 5, + PayloadHash: &refs.Checksum{Type: 6, Sum: []byte("any_checksum")}, + ObjectType: 7, + HomomorphicHash: &refs.Checksum{Type: 8, Sum: []byte("any_homomorphic_checksum")}, + }, + } + }) + + testWithPart(func(body *object.HeadResponse_Body) { + body.Head = &object.HeadResponse_Body_SplitInfo{ + SplitInfo: &object.SplitInfo{ + SplitId: []byte("any_split_ID"), + LastPart: &refs.ObjectID{Value: []byte("any_last")}, + Link: &refs.ObjectID{Value: []byte("any_link")}, + FirstPart: &refs.ObjectID{Value: []byte("any_first")}, + }, + } + }) + + testWithPart(func(body *object.HeadResponse_Body) { + body.Head = &object.HeadResponse_Body_Header{ + Header: &object.HeaderWithSignature{ + Header: &object.Header{ + Version: &refs.Version{Major: 2, Minor: 3}, + ContainerId: &refs.ContainerID{Value: []byte("any_container")}, + OwnerId: &refs.OwnerID{Value: []byte("any_owner")}, + CreationEpoch: 4, + PayloadLength: 5, + PayloadHash: &refs.Checksum{Type: 6, Sum: []byte("any_checksum")}, + ObjectType: 7, + HomomorphicHash: &refs.Checksum{Type: 8, Sum: []byte("any_homomorphic_checksum")}, + SessionToken: &session.SessionToken{ + Body: &session.SessionToken_Body{ + Id: []byte("any_ID"), + OwnerId: &refs.OwnerID{Value: []byte("any_owner")}, + Lifetime: &session.SessionToken_Body_TokenLifetime{Exp: 9, Nbf: 10, Iat: 11}, + SessionKey: []byte("any_key"), + }, + Signature: &refs.Signature{Key: []byte("any_key"), Sign: []byte("any_signature"), Scheme: 12}, + }, + Attributes: []*object.Header_Attribute{ + {Key: "attr_key1", Value: "attr_val1"}, + {Key: "attr_key2", Value: "attr_val2"}, + }, + Split: &object.Header_Split{ + Parent: &refs.ObjectID{Value: []byte("any_parent_ID")}, + Previous: &refs.ObjectID{Value: []byte("any_previous")}, + ParentSignature: &refs.Signature{ + Key: []byte("any_parent_public_key"), + Sign: []byte("any_parent_signature"), + Scheme: 13, + }, + ParentHeader: &object.Header{ + Version: &refs.Version{Major: 100, Minor: 101}, + ContainerId: &refs.ContainerID{Value: []byte("any_parent_container")}, + OwnerId: &refs.OwnerID{Value: []byte("any_parent_owner")}, + CreationEpoch: 102, + PayloadLength: 103, + PayloadHash: &refs.Checksum{Type: 104, Sum: []byte("any_parent_checksum")}, + ObjectType: 105, + HomomorphicHash: &refs.Checksum{Type: 106, Sum: []byte("any_parent_homomorphic_checksum")}, + Attributes: []*object.Header_Attribute{ + {Key: "parent_attr_key1", Value: "parent_attr_val2"}, + {Key: "parent_attr_key2", Value: "parent_attr_val2"}, + }, + }, + Children: []*refs.ObjectID{{Value: []byte("any_child1")}, {Value: []byte("any_child2")}}, + SplitId: []byte("any_split_ID"), + First: &refs.ObjectID{Value: []byte("any_first_ID")}, + }, + }, + Signature: &refs.Signature{ + Key: []byte("any_public_key"), + Sign: []byte("any_signature"), + Scheme: 987, + }, + }, + } + }) +} + +func TestGetRangeRequest_Body(t *testing.T) { + v := &object.GetRangeRequest_Body{ + Address: &refs.Address{ + ContainerId: &refs.ContainerID{Value: []byte("any_container")}, + ObjectId: &refs.ObjectID{Value: []byte("any_object")}, + }, + Range: &object.Range{Offset: 1, Length: 2}, + Raw: true, + } + + sz := v.MarshaledSize() + b := make([]byte, sz) + v.MarshalStable(b) + + var res object.GetRangeRequest_Body + err := proto.Unmarshal(b, &res) + require.NoError(t, err) + require.Empty(t, res.ProtoReflect().GetUnknown()) + require.Equal(t, v.Address, res.Address) + require.Equal(t, v.Range, res.Range) + require.Equal(t, v.Raw, res.Raw) +} + +func TestGetRangeResponse_Body(t *testing.T) { + var v object.GetRangeResponse_Body + + testWithPart := func(setPart func(body *object.GetRangeResponse_Body)) { + setPart(&v) + sz := v.MarshaledSize() + b := make([]byte, sz) + v.MarshalStable(b) + + var res object.GetRangeResponse_Body + err := proto.Unmarshal(b, &res) + require.NoError(t, err) + require.Empty(t, res.ProtoReflect().GetUnknown()) + require.Equal(t, v.RangePart, res.RangePart) + } + + testWithPart(func(body *object.GetRangeResponse_Body) { body.RangePart = nil }) + + testWithPart(func(body *object.GetRangeResponse_Body) { + body.RangePart = &object.GetRangeResponse_Body_Chunk{Chunk: []byte("any_chunk")} + }) + + testWithPart(func(body *object.GetRangeResponse_Body) { + body.RangePart = &object.GetRangeResponse_Body_SplitInfo{ + SplitInfo: &object.SplitInfo{ + SplitId: []byte("any_split_ID"), + LastPart: &refs.ObjectID{Value: []byte("any_last")}, + Link: &refs.ObjectID{Value: []byte("any_link")}, + FirstPart: &refs.ObjectID{Value: []byte("any_first")}, + }, + } + }) +} + +func TestGetRangeHashRequest_Body(t *testing.T) { + v := &object.GetRangeHashRequest_Body{ + Address: &refs.Address{ + ContainerId: &refs.ContainerID{Value: []byte("any_container")}, + ObjectId: &refs.ObjectID{Value: []byte("any_object")}, + }, + Ranges: []*object.Range{ + {Offset: 1, Length: 2}, + {Offset: 3, Length: 4}, + }, + Salt: []byte("any_salt"), + Type: 5, + } + + sz := v.MarshaledSize() + b := make([]byte, sz) + v.MarshalStable(b) + + var res object.GetRangeHashRequest_Body + err := proto.Unmarshal(b, &res) + require.NoError(t, err) + require.Empty(t, res.ProtoReflect().GetUnknown()) + require.Equal(t, v.Address, res.Address) + require.Equal(t, v.Ranges, res.Ranges) + require.Equal(t, v.Salt, res.Salt) + require.Equal(t, v.Type, res.Type) +} + +func TestGetRangeHashResponse_Body(t *testing.T) { + v := &object.GetRangeHashResponse_Body{ + Type: 1, + HashList: [][]byte{[]byte("any_hash1"), []byte("any_hash2")}, + } + + sz := v.MarshaledSize() + b := make([]byte, sz) + v.MarshalStable(b) + + var res object.GetRangeHashResponse_Body + err := proto.Unmarshal(b, &res) + require.NoError(t, err) + require.Empty(t, res.ProtoReflect().GetUnknown()) + require.Equal(t, v.Type, res.Type) + require.Equal(t, v.HashList, res.HashList) +} + +func TestDeleteRequest_Body(t *testing.T) { + v := &object.DeleteRequest_Body{ + Address: &refs.Address{ + ContainerId: &refs.ContainerID{Value: []byte("any_container")}, + ObjectId: &refs.ObjectID{Value: []byte("any_object")}, + }, + } + + sz := v.MarshaledSize() + b := make([]byte, sz) + v.MarshalStable(b) + + var res object.DeleteRequest_Body + err := proto.Unmarshal(b, &res) + require.NoError(t, err) + require.Empty(t, res.ProtoReflect().GetUnknown()) + require.Equal(t, v.Address, res.Address) +} + +func TestDeleteResponse_Body(t *testing.T) { + v := &object.DeleteResponse_Body{ + Tombstone: &refs.Address{ + ContainerId: &refs.ContainerID{Value: []byte("any_container")}, + ObjectId: &refs.ObjectID{Value: []byte("any_object")}, + }, + } + + sz := v.MarshaledSize() + b := make([]byte, sz) + v.MarshalStable(b) + + var res object.DeleteResponse_Body + err := proto.Unmarshal(b, &res) + require.NoError(t, err) + require.Empty(t, res.ProtoReflect().GetUnknown()) + require.Equal(t, v.Tombstone, res.Tombstone) +} + +func TestPutRequest_Body(t *testing.T) { + var v object.PutRequest_Body + + testWithPart := func(setPart func(body *object.PutRequest_Body)) { + setPart(&v) + sz := v.MarshaledSize() + b := make([]byte, sz) + v.MarshalStable(b) + + var res object.PutRequest_Body + err := proto.Unmarshal(b, &res) + require.NoError(t, err) + require.Empty(t, res.ProtoReflect().GetUnknown()) + require.Equal(t, v.ObjectPart, res.ObjectPart) + } + + testWithPart(func(body *object.PutRequest_Body) { body.ObjectPart = nil }) + + testWithPart(func(body *object.PutRequest_Body) { + body.ObjectPart = &object.PutRequest_Body_Init_{ + Init: &object.PutRequest_Body_Init{ + ObjectId: &refs.ObjectID{Value: []byte("any_object_ID")}, + Signature: &refs.Signature{ + Key: []byte("any_public_key"), + Sign: []byte("any_signature"), + Scheme: 1, + }, + Header: &object.Header{ + Version: &refs.Version{Major: 2, Minor: 3}, + ContainerId: &refs.ContainerID{Value: []byte("any_container")}, + OwnerId: &refs.OwnerID{Value: []byte("any_owner")}, + CreationEpoch: 4, + PayloadLength: 5, + PayloadHash: &refs.Checksum{Type: 6, Sum: []byte("any_checksum")}, + ObjectType: 7, + HomomorphicHash: &refs.Checksum{Type: 8, Sum: []byte("any_homomorphic_checksum")}, + SessionToken: &session.SessionToken{ + Body: &session.SessionToken_Body{ + Id: []byte("any_ID"), + OwnerId: &refs.OwnerID{Value: []byte("any_owner")}, + Lifetime: &session.SessionToken_Body_TokenLifetime{Exp: 9, Nbf: 10, Iat: 11}, + SessionKey: []byte("any_key"), + }, + Signature: &refs.Signature{Key: []byte("any_key"), Sign: []byte("any_signature"), Scheme: 12}, + }, + Attributes: []*object.Header_Attribute{ + {Key: "attr_key1", Value: "attr_val1"}, + {Key: "attr_key2", Value: "attr_val2"}, + }, + Split: &object.Header_Split{ + Parent: &refs.ObjectID{Value: []byte("any_parent_ID")}, + Previous: &refs.ObjectID{Value: []byte("any_previous")}, + ParentSignature: &refs.Signature{ + Key: []byte("any_parent_public_key"), + Sign: []byte("any_parent_signature"), + Scheme: 13, + }, + ParentHeader: &object.Header{ + Version: &refs.Version{Major: 100, Minor: 101}, + ContainerId: &refs.ContainerID{Value: []byte("any_parent_container")}, + OwnerId: &refs.OwnerID{Value: []byte("any_parent_owner")}, + CreationEpoch: 102, + PayloadLength: 103, + PayloadHash: &refs.Checksum{Type: 104, Sum: []byte("any_parent_checksum")}, + ObjectType: 105, + HomomorphicHash: &refs.Checksum{Type: 106, Sum: []byte("any_parent_homomorphic_checksum")}, + Attributes: []*object.Header_Attribute{ + {Key: "parent_attr_key1", Value: "parent_attr_val2"}, + {Key: "parent_attr_key2", Value: "parent_attr_val2"}, + }, + }, + Children: []*refs.ObjectID{{Value: []byte("any_child1")}, {Value: []byte("any_child2")}}, + SplitId: []byte("any_split_ID"), + First: &refs.ObjectID{Value: []byte("any_first_ID")}, + }, + }, + CopiesNumber: 1, + }, + } + }) + + testWithPart(func(body *object.PutRequest_Body) { + body.ObjectPart = &object.PutRequest_Body_Chunk{Chunk: []byte("any_chunk")} + }) +} + +func TestPutResponse_Body(t *testing.T) { + v := &object.PutResponse_Body{ + ObjectId: &refs.ObjectID{Value: []byte("any_object")}, + } + + sz := v.MarshaledSize() + b := make([]byte, sz) + v.MarshalStable(b) + + var res object.PutResponse_Body + err := proto.Unmarshal(b, &res) + require.NoError(t, err) + require.Empty(t, res.ProtoReflect().GetUnknown()) + require.Equal(t, v.ObjectId, res.ObjectId) +} + +func TestSearchRequest_Body(t *testing.T) { + v := &object.SearchRequest_Body{ + ContainerId: &refs.ContainerID{Value: []byte("any_container")}, + Version: 123, + Filters: []*object.SearchRequest_Body_Filter{ + {MatchType: 456, Key: "k1", Value: "v1"}, + {MatchType: 789, Key: "k2", Value: "v2"}, + }, + } + + sz := v.MarshaledSize() + b := make([]byte, sz) + v.MarshalStable(b) + + var res object.SearchRequest_Body + err := proto.Unmarshal(b, &res) + require.NoError(t, err) + require.Empty(t, res.ProtoReflect().GetUnknown()) + require.Equal(t, v.ContainerId, res.ContainerId) + require.Equal(t, v.Version, res.Version) + require.Equal(t, v.Filters, res.Filters) +} + +func TestSearchResponse_Body(t *testing.T) { + v := &object.SearchResponse_Body{ + IdList: []*refs.ObjectID{ + {Value: []byte("any_object1")}, + {Value: []byte("any_object2")}, + {Value: []byte("any_object3")}, + }, + } + + sz := v.MarshaledSize() + b := make([]byte, sz) + v.MarshalStable(b) + + var res object.SearchResponse_Body + err := proto.Unmarshal(b, &res) + require.NoError(t, err) + require.Empty(t, res.ProtoReflect().GetUnknown()) + require.Equal(t, v.IdList, res.IdList) +} diff --git a/api/object/service.pb.go b/api/object/service.pb.go new file mode 100644 index 000000000..7e39f3448 --- /dev/null +++ b/api/object/service.pb.go @@ -0,0 +1,3525 @@ +// Code generated by protoc-gen-go. DO NOT EDIT. +// versions: +// protoc-gen-go v1.32.0 +// protoc v4.25.1 +// source: object/grpc/service.proto + +package object + +import ( + reflect "reflect" + sync "sync" + + "github.com/nspcc-dev/neofs-sdk-go/api/refs" + "github.com/nspcc-dev/neofs-sdk-go/api/session" + "github.com/nspcc-dev/neofs-sdk-go/api/status" + protoreflect "google.golang.org/protobuf/reflect/protoreflect" + protoimpl "google.golang.org/protobuf/runtime/protoimpl" +) + +const ( + // Verify that this generated code is sufficiently up-to-date. + _ = protoimpl.EnforceVersion(20 - protoimpl.MinVersion) + // Verify that runtime/protoimpl is sufficiently up-to-date. + _ = protoimpl.EnforceVersion(protoimpl.MaxVersion - 20) +) + +// GET object request +type GetRequest struct { + state protoimpl.MessageState + sizeCache protoimpl.SizeCache + unknownFields protoimpl.UnknownFields + + // Body of get object request message. + Body *GetRequest_Body `protobuf:"bytes,1,opt,name=body,proto3" json:"body,omitempty"` + // Carries request meta information. Header data is used only to regulate + // message transport and does not affect request execution. + MetaHeader *session.RequestMetaHeader `protobuf:"bytes,2,opt,name=meta_header,json=metaHeader,proto3" json:"meta_header,omitempty"` + // Carries request verification information. This header is used to + // authenticate the nodes of the message route and check the correctness of + // transmission. + VerifyHeader *session.RequestVerificationHeader `protobuf:"bytes,3,opt,name=verify_header,json=verifyHeader,proto3" json:"verify_header,omitempty"` +} + +func (x *GetRequest) Reset() { + *x = GetRequest{} + if protoimpl.UnsafeEnabled { + mi := &file_object_grpc_service_proto_msgTypes[0] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) + } +} + +func (x *GetRequest) String() string { + return protoimpl.X.MessageStringOf(x) +} + +func (*GetRequest) ProtoMessage() {} + +func (x *GetRequest) ProtoReflect() protoreflect.Message { + mi := &file_object_grpc_service_proto_msgTypes[0] + if protoimpl.UnsafeEnabled && x != nil { + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + if ms.LoadMessageInfo() == nil { + ms.StoreMessageInfo(mi) + } + return ms + } + return mi.MessageOf(x) +} + +// Deprecated: Use GetRequest.ProtoReflect.Descriptor instead. +func (*GetRequest) Descriptor() ([]byte, []int) { + return file_object_grpc_service_proto_rawDescGZIP(), []int{0} +} + +func (x *GetRequest) GetBody() *GetRequest_Body { + if x != nil { + return x.Body + } + return nil +} + +func (x *GetRequest) GetMetaHeader() *session.RequestMetaHeader { + if x != nil { + return x.MetaHeader + } + return nil +} + +func (x *GetRequest) GetVerifyHeader() *session.RequestVerificationHeader { + if x != nil { + return x.VerifyHeader + } + return nil +} + +// GET object response +type GetResponse struct { + state protoimpl.MessageState + sizeCache protoimpl.SizeCache + unknownFields protoimpl.UnknownFields + + // Body of get object response message. + Body *GetResponse_Body `protobuf:"bytes,1,opt,name=body,proto3" json:"body,omitempty"` + // Carries response meta information. Header data is used only to regulate + // message transport and does not affect request execution. + MetaHeader *session.ResponseMetaHeader `protobuf:"bytes,2,opt,name=meta_header,json=metaHeader,proto3" json:"meta_header,omitempty"` + // Carries response verification information. This header is used to + // authenticate the nodes of the message route and check the correctness of + // transmission. + VerifyHeader *session.ResponseVerificationHeader `protobuf:"bytes,3,opt,name=verify_header,json=verifyHeader,proto3" json:"verify_header,omitempty"` +} + +func (x *GetResponse) Reset() { + *x = GetResponse{} + if protoimpl.UnsafeEnabled { + mi := &file_object_grpc_service_proto_msgTypes[1] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) + } +} + +func (x *GetResponse) String() string { + return protoimpl.X.MessageStringOf(x) +} + +func (*GetResponse) ProtoMessage() {} + +func (x *GetResponse) ProtoReflect() protoreflect.Message { + mi := &file_object_grpc_service_proto_msgTypes[1] + if protoimpl.UnsafeEnabled && x != nil { + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + if ms.LoadMessageInfo() == nil { + ms.StoreMessageInfo(mi) + } + return ms + } + return mi.MessageOf(x) +} + +// Deprecated: Use GetResponse.ProtoReflect.Descriptor instead. +func (*GetResponse) Descriptor() ([]byte, []int) { + return file_object_grpc_service_proto_rawDescGZIP(), []int{1} +} + +func (x *GetResponse) GetBody() *GetResponse_Body { + if x != nil { + return x.Body + } + return nil +} + +func (x *GetResponse) GetMetaHeader() *session.ResponseMetaHeader { + if x != nil { + return x.MetaHeader + } + return nil +} + +func (x *GetResponse) GetVerifyHeader() *session.ResponseVerificationHeader { + if x != nil { + return x.VerifyHeader + } + return nil +} + +// PUT object request +type PutRequest struct { + state protoimpl.MessageState + sizeCache protoimpl.SizeCache + unknownFields protoimpl.UnknownFields + + // Body of put object request message. + Body *PutRequest_Body `protobuf:"bytes,1,opt,name=body,proto3" json:"body,omitempty"` + // Carries request meta information. Header data is used only to regulate + // message transport and does not affect request execution. + MetaHeader *session.RequestMetaHeader `protobuf:"bytes,2,opt,name=meta_header,json=metaHeader,proto3" json:"meta_header,omitempty"` + // Carries request verification information. This header is used to + // authenticate the nodes of the message route and check the correctness of + // transmission. + VerifyHeader *session.RequestVerificationHeader `protobuf:"bytes,3,opt,name=verify_header,json=verifyHeader,proto3" json:"verify_header,omitempty"` +} + +func (x *PutRequest) Reset() { + *x = PutRequest{} + if protoimpl.UnsafeEnabled { + mi := &file_object_grpc_service_proto_msgTypes[2] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) + } +} + +func (x *PutRequest) String() string { + return protoimpl.X.MessageStringOf(x) +} + +func (*PutRequest) ProtoMessage() {} + +func (x *PutRequest) ProtoReflect() protoreflect.Message { + mi := &file_object_grpc_service_proto_msgTypes[2] + if protoimpl.UnsafeEnabled && x != nil { + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + if ms.LoadMessageInfo() == nil { + ms.StoreMessageInfo(mi) + } + return ms + } + return mi.MessageOf(x) +} + +// Deprecated: Use PutRequest.ProtoReflect.Descriptor instead. +func (*PutRequest) Descriptor() ([]byte, []int) { + return file_object_grpc_service_proto_rawDescGZIP(), []int{2} +} + +func (x *PutRequest) GetBody() *PutRequest_Body { + if x != nil { + return x.Body + } + return nil +} + +func (x *PutRequest) GetMetaHeader() *session.RequestMetaHeader { + if x != nil { + return x.MetaHeader + } + return nil +} + +func (x *PutRequest) GetVerifyHeader() *session.RequestVerificationHeader { + if x != nil { + return x.VerifyHeader + } + return nil +} + +// PUT Object response +type PutResponse struct { + state protoimpl.MessageState + sizeCache protoimpl.SizeCache + unknownFields protoimpl.UnknownFields + + // Body of put object response message. + Body *PutResponse_Body `protobuf:"bytes,1,opt,name=body,proto3" json:"body,omitempty"` + // Carries response meta information. Header data is used only to regulate + // message transport and does not affect request execution. + MetaHeader *session.ResponseMetaHeader `protobuf:"bytes,2,opt,name=meta_header,json=metaHeader,proto3" json:"meta_header,omitempty"` + // Carries response verification information. This header is used to + // authenticate the nodes of the message route and check the correctness of + // transmission. + VerifyHeader *session.ResponseVerificationHeader `protobuf:"bytes,3,opt,name=verify_header,json=verifyHeader,proto3" json:"verify_header,omitempty"` +} + +func (x *PutResponse) Reset() { + *x = PutResponse{} + if protoimpl.UnsafeEnabled { + mi := &file_object_grpc_service_proto_msgTypes[3] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) + } +} + +func (x *PutResponse) String() string { + return protoimpl.X.MessageStringOf(x) +} + +func (*PutResponse) ProtoMessage() {} + +func (x *PutResponse) ProtoReflect() protoreflect.Message { + mi := &file_object_grpc_service_proto_msgTypes[3] + if protoimpl.UnsafeEnabled && x != nil { + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + if ms.LoadMessageInfo() == nil { + ms.StoreMessageInfo(mi) + } + return ms + } + return mi.MessageOf(x) +} + +// Deprecated: Use PutResponse.ProtoReflect.Descriptor instead. +func (*PutResponse) Descriptor() ([]byte, []int) { + return file_object_grpc_service_proto_rawDescGZIP(), []int{3} +} + +func (x *PutResponse) GetBody() *PutResponse_Body { + if x != nil { + return x.Body + } + return nil +} + +func (x *PutResponse) GetMetaHeader() *session.ResponseMetaHeader { + if x != nil { + return x.MetaHeader + } + return nil +} + +func (x *PutResponse) GetVerifyHeader() *session.ResponseVerificationHeader { + if x != nil { + return x.VerifyHeader + } + return nil +} + +// Object DELETE request +type DeleteRequest struct { + state protoimpl.MessageState + sizeCache protoimpl.SizeCache + unknownFields protoimpl.UnknownFields + + // Body of delete object request message. + Body *DeleteRequest_Body `protobuf:"bytes,1,opt,name=body,proto3" json:"body,omitempty"` + // Carries request meta information. Header data is used only to regulate + // message transport and does not affect request execution. + MetaHeader *session.RequestMetaHeader `protobuf:"bytes,2,opt,name=meta_header,json=metaHeader,proto3" json:"meta_header,omitempty"` + // Carries request verification information. This header is used to + // authenticate the nodes of the message route and check the correctness of + // transmission. + VerifyHeader *session.RequestVerificationHeader `protobuf:"bytes,3,opt,name=verify_header,json=verifyHeader,proto3" json:"verify_header,omitempty"` +} + +func (x *DeleteRequest) Reset() { + *x = DeleteRequest{} + if protoimpl.UnsafeEnabled { + mi := &file_object_grpc_service_proto_msgTypes[4] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) + } +} + +func (x *DeleteRequest) String() string { + return protoimpl.X.MessageStringOf(x) +} + +func (*DeleteRequest) ProtoMessage() {} + +func (x *DeleteRequest) ProtoReflect() protoreflect.Message { + mi := &file_object_grpc_service_proto_msgTypes[4] + if protoimpl.UnsafeEnabled && x != nil { + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + if ms.LoadMessageInfo() == nil { + ms.StoreMessageInfo(mi) + } + return ms + } + return mi.MessageOf(x) +} + +// Deprecated: Use DeleteRequest.ProtoReflect.Descriptor instead. +func (*DeleteRequest) Descriptor() ([]byte, []int) { + return file_object_grpc_service_proto_rawDescGZIP(), []int{4} +} + +func (x *DeleteRequest) GetBody() *DeleteRequest_Body { + if x != nil { + return x.Body + } + return nil +} + +func (x *DeleteRequest) GetMetaHeader() *session.RequestMetaHeader { + if x != nil { + return x.MetaHeader + } + return nil +} + +func (x *DeleteRequest) GetVerifyHeader() *session.RequestVerificationHeader { + if x != nil { + return x.VerifyHeader + } + return nil +} + +// DeleteResponse body is empty because we cannot guarantee permanent object +// removal in distributed system. +type DeleteResponse struct { + state protoimpl.MessageState + sizeCache protoimpl.SizeCache + unknownFields protoimpl.UnknownFields + + // Body of delete object response message. + Body *DeleteResponse_Body `protobuf:"bytes,1,opt,name=body,proto3" json:"body,omitempty"` + // Carries response meta information. Header data is used only to regulate + // message transport and does not affect request execution. + MetaHeader *session.ResponseMetaHeader `protobuf:"bytes,2,opt,name=meta_header,json=metaHeader,proto3" json:"meta_header,omitempty"` + // Carries response verification information. This header is used to + // authenticate the nodes of the message route and check the correctness of + // transmission. + VerifyHeader *session.ResponseVerificationHeader `protobuf:"bytes,3,opt,name=verify_header,json=verifyHeader,proto3" json:"verify_header,omitempty"` +} + +func (x *DeleteResponse) Reset() { + *x = DeleteResponse{} + if protoimpl.UnsafeEnabled { + mi := &file_object_grpc_service_proto_msgTypes[5] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) + } +} + +func (x *DeleteResponse) String() string { + return protoimpl.X.MessageStringOf(x) +} + +func (*DeleteResponse) ProtoMessage() {} + +func (x *DeleteResponse) ProtoReflect() protoreflect.Message { + mi := &file_object_grpc_service_proto_msgTypes[5] + if protoimpl.UnsafeEnabled && x != nil { + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + if ms.LoadMessageInfo() == nil { + ms.StoreMessageInfo(mi) + } + return ms + } + return mi.MessageOf(x) +} + +// Deprecated: Use DeleteResponse.ProtoReflect.Descriptor instead. +func (*DeleteResponse) Descriptor() ([]byte, []int) { + return file_object_grpc_service_proto_rawDescGZIP(), []int{5} +} + +func (x *DeleteResponse) GetBody() *DeleteResponse_Body { + if x != nil { + return x.Body + } + return nil +} + +func (x *DeleteResponse) GetMetaHeader() *session.ResponseMetaHeader { + if x != nil { + return x.MetaHeader + } + return nil +} + +func (x *DeleteResponse) GetVerifyHeader() *session.ResponseVerificationHeader { + if x != nil { + return x.VerifyHeader + } + return nil +} + +// Object HEAD request +type HeadRequest struct { + state protoimpl.MessageState + sizeCache protoimpl.SizeCache + unknownFields protoimpl.UnknownFields + + // Body of head object request message. + Body *HeadRequest_Body `protobuf:"bytes,1,opt,name=body,proto3" json:"body,omitempty"` + // Carries request meta information. Header data is used only to regulate + // message transport and does not affect request execution. + MetaHeader *session.RequestMetaHeader `protobuf:"bytes,2,opt,name=meta_header,json=metaHeader,proto3" json:"meta_header,omitempty"` + // Carries request verification information. This header is used to + // authenticate the nodes of the message route and check the correctness of + // transmission. + VerifyHeader *session.RequestVerificationHeader `protobuf:"bytes,3,opt,name=verify_header,json=verifyHeader,proto3" json:"verify_header,omitempty"` +} + +func (x *HeadRequest) Reset() { + *x = HeadRequest{} + if protoimpl.UnsafeEnabled { + mi := &file_object_grpc_service_proto_msgTypes[6] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) + } +} + +func (x *HeadRequest) String() string { + return protoimpl.X.MessageStringOf(x) +} + +func (*HeadRequest) ProtoMessage() {} + +func (x *HeadRequest) ProtoReflect() protoreflect.Message { + mi := &file_object_grpc_service_proto_msgTypes[6] + if protoimpl.UnsafeEnabled && x != nil { + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + if ms.LoadMessageInfo() == nil { + ms.StoreMessageInfo(mi) + } + return ms + } + return mi.MessageOf(x) +} + +// Deprecated: Use HeadRequest.ProtoReflect.Descriptor instead. +func (*HeadRequest) Descriptor() ([]byte, []int) { + return file_object_grpc_service_proto_rawDescGZIP(), []int{6} +} + +func (x *HeadRequest) GetBody() *HeadRequest_Body { + if x != nil { + return x.Body + } + return nil +} + +func (x *HeadRequest) GetMetaHeader() *session.RequestMetaHeader { + if x != nil { + return x.MetaHeader + } + return nil +} + +func (x *HeadRequest) GetVerifyHeader() *session.RequestVerificationHeader { + if x != nil { + return x.VerifyHeader + } + return nil +} + +// Tuple of a full object header and signature of an `ObjectID`. \ +// Signed `ObjectID` is present to verify full header's authenticity through the +// following steps: +// +// 1. Calculate `SHA-256` of the marshalled `Header` structure +// 2. Check if the resulting hash matches `ObjectID` +// 3. Check if `ObjectID` signature in `signature` field is correct +type HeaderWithSignature struct { + state protoimpl.MessageState + sizeCache protoimpl.SizeCache + unknownFields protoimpl.UnknownFields + + // Full object header + Header *Header `protobuf:"bytes,1,opt,name=header,proto3" json:"header,omitempty"` + // Signed `ObjectID` to verify full header's authenticity + Signature *refs.Signature `protobuf:"bytes,2,opt,name=signature,proto3" json:"signature,omitempty"` +} + +func (x *HeaderWithSignature) Reset() { + *x = HeaderWithSignature{} + if protoimpl.UnsafeEnabled { + mi := &file_object_grpc_service_proto_msgTypes[7] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) + } +} + +func (x *HeaderWithSignature) String() string { + return protoimpl.X.MessageStringOf(x) +} + +func (*HeaderWithSignature) ProtoMessage() {} + +func (x *HeaderWithSignature) ProtoReflect() protoreflect.Message { + mi := &file_object_grpc_service_proto_msgTypes[7] + if protoimpl.UnsafeEnabled && x != nil { + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + if ms.LoadMessageInfo() == nil { + ms.StoreMessageInfo(mi) + } + return ms + } + return mi.MessageOf(x) +} + +// Deprecated: Use HeaderWithSignature.ProtoReflect.Descriptor instead. +func (*HeaderWithSignature) Descriptor() ([]byte, []int) { + return file_object_grpc_service_proto_rawDescGZIP(), []int{7} +} + +func (x *HeaderWithSignature) GetHeader() *Header { + if x != nil { + return x.Header + } + return nil +} + +func (x *HeaderWithSignature) GetSignature() *refs.Signature { + if x != nil { + return x.Signature + } + return nil +} + +// Object HEAD response +type HeadResponse struct { + state protoimpl.MessageState + sizeCache protoimpl.SizeCache + unknownFields protoimpl.UnknownFields + + // Body of head object response message. + Body *HeadResponse_Body `protobuf:"bytes,1,opt,name=body,proto3" json:"body,omitempty"` + // Carries response meta information. Header data is used only to regulate + // message transport and does not affect request execution. + MetaHeader *session.ResponseMetaHeader `protobuf:"bytes,2,opt,name=meta_header,json=metaHeader,proto3" json:"meta_header,omitempty"` + // Carries response verification information. This header is used to + // authenticate the nodes of the message route and check the correctness of + // transmission. + VerifyHeader *session.ResponseVerificationHeader `protobuf:"bytes,3,opt,name=verify_header,json=verifyHeader,proto3" json:"verify_header,omitempty"` +} + +func (x *HeadResponse) Reset() { + *x = HeadResponse{} + if protoimpl.UnsafeEnabled { + mi := &file_object_grpc_service_proto_msgTypes[8] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) + } +} + +func (x *HeadResponse) String() string { + return protoimpl.X.MessageStringOf(x) +} + +func (*HeadResponse) ProtoMessage() {} + +func (x *HeadResponse) ProtoReflect() protoreflect.Message { + mi := &file_object_grpc_service_proto_msgTypes[8] + if protoimpl.UnsafeEnabled && x != nil { + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + if ms.LoadMessageInfo() == nil { + ms.StoreMessageInfo(mi) + } + return ms + } + return mi.MessageOf(x) +} + +// Deprecated: Use HeadResponse.ProtoReflect.Descriptor instead. +func (*HeadResponse) Descriptor() ([]byte, []int) { + return file_object_grpc_service_proto_rawDescGZIP(), []int{8} +} + +func (x *HeadResponse) GetBody() *HeadResponse_Body { + if x != nil { + return x.Body + } + return nil +} + +func (x *HeadResponse) GetMetaHeader() *session.ResponseMetaHeader { + if x != nil { + return x.MetaHeader + } + return nil +} + +func (x *HeadResponse) GetVerifyHeader() *session.ResponseVerificationHeader { + if x != nil { + return x.VerifyHeader + } + return nil +} + +// Object Search request +type SearchRequest struct { + state protoimpl.MessageState + sizeCache protoimpl.SizeCache + unknownFields protoimpl.UnknownFields + + // Body of search object request message. + Body *SearchRequest_Body `protobuf:"bytes,1,opt,name=body,proto3" json:"body,omitempty"` + // Carries request meta information. Header data is used only to regulate + // message transport and does not affect request execution. + MetaHeader *session.RequestMetaHeader `protobuf:"bytes,2,opt,name=meta_header,json=metaHeader,proto3" json:"meta_header,omitempty"` + // Carries request verification information. This header is used to + // authenticate the nodes of the message route and check the correctness of + // transmission. + VerifyHeader *session.RequestVerificationHeader `protobuf:"bytes,3,opt,name=verify_header,json=verifyHeader,proto3" json:"verify_header,omitempty"` +} + +func (x *SearchRequest) Reset() { + *x = SearchRequest{} + if protoimpl.UnsafeEnabled { + mi := &file_object_grpc_service_proto_msgTypes[9] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) + } +} + +func (x *SearchRequest) String() string { + return protoimpl.X.MessageStringOf(x) +} + +func (*SearchRequest) ProtoMessage() {} + +func (x *SearchRequest) ProtoReflect() protoreflect.Message { + mi := &file_object_grpc_service_proto_msgTypes[9] + if protoimpl.UnsafeEnabled && x != nil { + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + if ms.LoadMessageInfo() == nil { + ms.StoreMessageInfo(mi) + } + return ms + } + return mi.MessageOf(x) +} + +// Deprecated: Use SearchRequest.ProtoReflect.Descriptor instead. +func (*SearchRequest) Descriptor() ([]byte, []int) { + return file_object_grpc_service_proto_rawDescGZIP(), []int{9} +} + +func (x *SearchRequest) GetBody() *SearchRequest_Body { + if x != nil { + return x.Body + } + return nil +} + +func (x *SearchRequest) GetMetaHeader() *session.RequestMetaHeader { + if x != nil { + return x.MetaHeader + } + return nil +} + +func (x *SearchRequest) GetVerifyHeader() *session.RequestVerificationHeader { + if x != nil { + return x.VerifyHeader + } + return nil +} + +// Search response +type SearchResponse struct { + state protoimpl.MessageState + sizeCache protoimpl.SizeCache + unknownFields protoimpl.UnknownFields + + // Body of search object response message. + Body *SearchResponse_Body `protobuf:"bytes,1,opt,name=body,proto3" json:"body,omitempty"` + // Carries response meta information. Header data is used only to regulate + // message transport and does not affect request execution. + MetaHeader *session.ResponseMetaHeader `protobuf:"bytes,2,opt,name=meta_header,json=metaHeader,proto3" json:"meta_header,omitempty"` + // Carries response verification information. This header is used to + // authenticate the nodes of the message route and check the correctness of + // transmission. + VerifyHeader *session.ResponseVerificationHeader `protobuf:"bytes,3,opt,name=verify_header,json=verifyHeader,proto3" json:"verify_header,omitempty"` +} + +func (x *SearchResponse) Reset() { + *x = SearchResponse{} + if protoimpl.UnsafeEnabled { + mi := &file_object_grpc_service_proto_msgTypes[10] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) + } +} + +func (x *SearchResponse) String() string { + return protoimpl.X.MessageStringOf(x) +} + +func (*SearchResponse) ProtoMessage() {} + +func (x *SearchResponse) ProtoReflect() protoreflect.Message { + mi := &file_object_grpc_service_proto_msgTypes[10] + if protoimpl.UnsafeEnabled && x != nil { + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + if ms.LoadMessageInfo() == nil { + ms.StoreMessageInfo(mi) + } + return ms + } + return mi.MessageOf(x) +} + +// Deprecated: Use SearchResponse.ProtoReflect.Descriptor instead. +func (*SearchResponse) Descriptor() ([]byte, []int) { + return file_object_grpc_service_proto_rawDescGZIP(), []int{10} +} + +func (x *SearchResponse) GetBody() *SearchResponse_Body { + if x != nil { + return x.Body + } + return nil +} + +func (x *SearchResponse) GetMetaHeader() *session.ResponseMetaHeader { + if x != nil { + return x.MetaHeader + } + return nil +} + +func (x *SearchResponse) GetVerifyHeader() *session.ResponseVerificationHeader { + if x != nil { + return x.VerifyHeader + } + return nil +} + +// Object payload range. Ranges of zero length SHOULD be considered as invalid. +type Range struct { + state protoimpl.MessageState + sizeCache protoimpl.SizeCache + unknownFields protoimpl.UnknownFields + + // Offset of the range from the object payload start + Offset uint64 `protobuf:"varint,1,opt,name=offset,proto3" json:"offset,omitempty"` + // Length in bytes of the object payload range + Length uint64 `protobuf:"varint,2,opt,name=length,proto3" json:"length,omitempty"` +} + +func (x *Range) Reset() { + *x = Range{} + if protoimpl.UnsafeEnabled { + mi := &file_object_grpc_service_proto_msgTypes[11] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) + } +} + +func (x *Range) String() string { + return protoimpl.X.MessageStringOf(x) +} + +func (*Range) ProtoMessage() {} + +func (x *Range) ProtoReflect() protoreflect.Message { + mi := &file_object_grpc_service_proto_msgTypes[11] + if protoimpl.UnsafeEnabled && x != nil { + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + if ms.LoadMessageInfo() == nil { + ms.StoreMessageInfo(mi) + } + return ms + } + return mi.MessageOf(x) +} + +// Deprecated: Use Range.ProtoReflect.Descriptor instead. +func (*Range) Descriptor() ([]byte, []int) { + return file_object_grpc_service_proto_rawDescGZIP(), []int{11} +} + +func (x *Range) GetOffset() uint64 { + if x != nil { + return x.Offset + } + return 0 +} + +func (x *Range) GetLength() uint64 { + if x != nil { + return x.Length + } + return 0 +} + +// Request part of object's payload +type GetRangeRequest struct { + state protoimpl.MessageState + sizeCache protoimpl.SizeCache + unknownFields protoimpl.UnknownFields + + // Body of get range object request message. + Body *GetRangeRequest_Body `protobuf:"bytes,1,opt,name=body,proto3" json:"body,omitempty"` + // Carries request meta information. Header data is used only to regulate + // message transport and does not affect request execution. + MetaHeader *session.RequestMetaHeader `protobuf:"bytes,2,opt,name=meta_header,json=metaHeader,proto3" json:"meta_header,omitempty"` + // Carries request verification information. This header is used to + // authenticate the nodes of the message route and check the correctness of + // transmission. + VerifyHeader *session.RequestVerificationHeader `protobuf:"bytes,3,opt,name=verify_header,json=verifyHeader,proto3" json:"verify_header,omitempty"` +} + +func (x *GetRangeRequest) Reset() { + *x = GetRangeRequest{} + if protoimpl.UnsafeEnabled { + mi := &file_object_grpc_service_proto_msgTypes[12] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) + } +} + +func (x *GetRangeRequest) String() string { + return protoimpl.X.MessageStringOf(x) +} + +func (*GetRangeRequest) ProtoMessage() {} + +func (x *GetRangeRequest) ProtoReflect() protoreflect.Message { + mi := &file_object_grpc_service_proto_msgTypes[12] + if protoimpl.UnsafeEnabled && x != nil { + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + if ms.LoadMessageInfo() == nil { + ms.StoreMessageInfo(mi) + } + return ms + } + return mi.MessageOf(x) +} + +// Deprecated: Use GetRangeRequest.ProtoReflect.Descriptor instead. +func (*GetRangeRequest) Descriptor() ([]byte, []int) { + return file_object_grpc_service_proto_rawDescGZIP(), []int{12} +} + +func (x *GetRangeRequest) GetBody() *GetRangeRequest_Body { + if x != nil { + return x.Body + } + return nil +} + +func (x *GetRangeRequest) GetMetaHeader() *session.RequestMetaHeader { + if x != nil { + return x.MetaHeader + } + return nil +} + +func (x *GetRangeRequest) GetVerifyHeader() *session.RequestVerificationHeader { + if x != nil { + return x.VerifyHeader + } + return nil +} + +// Get part of object's payload +type GetRangeResponse struct { + state protoimpl.MessageState + sizeCache protoimpl.SizeCache + unknownFields protoimpl.UnknownFields + + // Body of get range object response message. + Body *GetRangeResponse_Body `protobuf:"bytes,1,opt,name=body,proto3" json:"body,omitempty"` + // Carries response meta information. Header data is used only to regulate + // message transport and does not affect request execution. + MetaHeader *session.ResponseMetaHeader `protobuf:"bytes,2,opt,name=meta_header,json=metaHeader,proto3" json:"meta_header,omitempty"` + // Carries response verification information. This header is used to + // authenticate the nodes of the message route and check the correctness of + // transmission. + VerifyHeader *session.ResponseVerificationHeader `protobuf:"bytes,3,opt,name=verify_header,json=verifyHeader,proto3" json:"verify_header,omitempty"` +} + +func (x *GetRangeResponse) Reset() { + *x = GetRangeResponse{} + if protoimpl.UnsafeEnabled { + mi := &file_object_grpc_service_proto_msgTypes[13] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) + } +} + +func (x *GetRangeResponse) String() string { + return protoimpl.X.MessageStringOf(x) +} + +func (*GetRangeResponse) ProtoMessage() {} + +func (x *GetRangeResponse) ProtoReflect() protoreflect.Message { + mi := &file_object_grpc_service_proto_msgTypes[13] + if protoimpl.UnsafeEnabled && x != nil { + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + if ms.LoadMessageInfo() == nil { + ms.StoreMessageInfo(mi) + } + return ms + } + return mi.MessageOf(x) +} + +// Deprecated: Use GetRangeResponse.ProtoReflect.Descriptor instead. +func (*GetRangeResponse) Descriptor() ([]byte, []int) { + return file_object_grpc_service_proto_rawDescGZIP(), []int{13} +} + +func (x *GetRangeResponse) GetBody() *GetRangeResponse_Body { + if x != nil { + return x.Body + } + return nil +} + +func (x *GetRangeResponse) GetMetaHeader() *session.ResponseMetaHeader { + if x != nil { + return x.MetaHeader + } + return nil +} + +func (x *GetRangeResponse) GetVerifyHeader() *session.ResponseVerificationHeader { + if x != nil { + return x.VerifyHeader + } + return nil +} + +// Get hash of object's payload part +type GetRangeHashRequest struct { + state protoimpl.MessageState + sizeCache protoimpl.SizeCache + unknownFields protoimpl.UnknownFields + + // Body of get range hash object request message. + Body *GetRangeHashRequest_Body `protobuf:"bytes,1,opt,name=body,proto3" json:"body,omitempty"` + // Carries request meta information. Header data is used only to regulate + // message transport and does not affect request execution. + MetaHeader *session.RequestMetaHeader `protobuf:"bytes,2,opt,name=meta_header,json=metaHeader,proto3" json:"meta_header,omitempty"` + // Carries request verification information. This header is used to + // authenticate the nodes of the message route and check the correctness of + // transmission. + VerifyHeader *session.RequestVerificationHeader `protobuf:"bytes,3,opt,name=verify_header,json=verifyHeader,proto3" json:"verify_header,omitempty"` +} + +func (x *GetRangeHashRequest) Reset() { + *x = GetRangeHashRequest{} + if protoimpl.UnsafeEnabled { + mi := &file_object_grpc_service_proto_msgTypes[14] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) + } +} + +func (x *GetRangeHashRequest) String() string { + return protoimpl.X.MessageStringOf(x) +} + +func (*GetRangeHashRequest) ProtoMessage() {} + +func (x *GetRangeHashRequest) ProtoReflect() protoreflect.Message { + mi := &file_object_grpc_service_proto_msgTypes[14] + if protoimpl.UnsafeEnabled && x != nil { + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + if ms.LoadMessageInfo() == nil { + ms.StoreMessageInfo(mi) + } + return ms + } + return mi.MessageOf(x) +} + +// Deprecated: Use GetRangeHashRequest.ProtoReflect.Descriptor instead. +func (*GetRangeHashRequest) Descriptor() ([]byte, []int) { + return file_object_grpc_service_proto_rawDescGZIP(), []int{14} +} + +func (x *GetRangeHashRequest) GetBody() *GetRangeHashRequest_Body { + if x != nil { + return x.Body + } + return nil +} + +func (x *GetRangeHashRequest) GetMetaHeader() *session.RequestMetaHeader { + if x != nil { + return x.MetaHeader + } + return nil +} + +func (x *GetRangeHashRequest) GetVerifyHeader() *session.RequestVerificationHeader { + if x != nil { + return x.VerifyHeader + } + return nil +} + +// Get hash of object's payload part +type GetRangeHashResponse struct { + state protoimpl.MessageState + sizeCache protoimpl.SizeCache + unknownFields protoimpl.UnknownFields + + // Body of get range hash object response message. + Body *GetRangeHashResponse_Body `protobuf:"bytes,1,opt,name=body,proto3" json:"body,omitempty"` + // Carries response meta information. Header data is used only to regulate + // message transport and does not affect request execution. + MetaHeader *session.ResponseMetaHeader `protobuf:"bytes,2,opt,name=meta_header,json=metaHeader,proto3" json:"meta_header,omitempty"` + // Carries response verification information. This header is used to + // authenticate the nodes of the message route and check the correctness of + // transmission. + VerifyHeader *session.ResponseVerificationHeader `protobuf:"bytes,3,opt,name=verify_header,json=verifyHeader,proto3" json:"verify_header,omitempty"` +} + +func (x *GetRangeHashResponse) Reset() { + *x = GetRangeHashResponse{} + if protoimpl.UnsafeEnabled { + mi := &file_object_grpc_service_proto_msgTypes[15] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) + } +} + +func (x *GetRangeHashResponse) String() string { + return protoimpl.X.MessageStringOf(x) +} + +func (*GetRangeHashResponse) ProtoMessage() {} + +func (x *GetRangeHashResponse) ProtoReflect() protoreflect.Message { + mi := &file_object_grpc_service_proto_msgTypes[15] + if protoimpl.UnsafeEnabled && x != nil { + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + if ms.LoadMessageInfo() == nil { + ms.StoreMessageInfo(mi) + } + return ms + } + return mi.MessageOf(x) +} + +// Deprecated: Use GetRangeHashResponse.ProtoReflect.Descriptor instead. +func (*GetRangeHashResponse) Descriptor() ([]byte, []int) { + return file_object_grpc_service_proto_rawDescGZIP(), []int{15} +} + +func (x *GetRangeHashResponse) GetBody() *GetRangeHashResponse_Body { + if x != nil { + return x.Body + } + return nil +} + +func (x *GetRangeHashResponse) GetMetaHeader() *session.ResponseMetaHeader { + if x != nil { + return x.MetaHeader + } + return nil +} + +func (x *GetRangeHashResponse) GetVerifyHeader() *session.ResponseVerificationHeader { + if x != nil { + return x.VerifyHeader + } + return nil +} + +// Replicate RPC request +type ReplicateRequest struct { + state protoimpl.MessageState + sizeCache protoimpl.SizeCache + unknownFields protoimpl.UnknownFields + + // Object to be replicated. + Object *Object `protobuf:"bytes,1,opt,name=object,proto3" json:"object,omitempty"` + // Signature of all other request fields serialized in Protocol Buffers v3 + // format in ascending order of fields. + Signature *refs.Signature `protobuf:"bytes,2,opt,name=signature,proto3" json:"signature,omitempty"` +} + +func (x *ReplicateRequest) Reset() { + *x = ReplicateRequest{} + if protoimpl.UnsafeEnabled { + mi := &file_object_grpc_service_proto_msgTypes[16] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) + } +} + +func (x *ReplicateRequest) String() string { + return protoimpl.X.MessageStringOf(x) +} + +func (*ReplicateRequest) ProtoMessage() {} + +func (x *ReplicateRequest) ProtoReflect() protoreflect.Message { + mi := &file_object_grpc_service_proto_msgTypes[16] + if protoimpl.UnsafeEnabled && x != nil { + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + if ms.LoadMessageInfo() == nil { + ms.StoreMessageInfo(mi) + } + return ms + } + return mi.MessageOf(x) +} + +// Deprecated: Use ReplicateRequest.ProtoReflect.Descriptor instead. +func (*ReplicateRequest) Descriptor() ([]byte, []int) { + return file_object_grpc_service_proto_rawDescGZIP(), []int{16} +} + +func (x *ReplicateRequest) GetObject() *Object { + if x != nil { + return x.Object + } + return nil +} + +func (x *ReplicateRequest) GetSignature() *refs.Signature { + if x != nil { + return x.Signature + } + return nil +} + +// Replicate RPC response +type ReplicateResponse struct { + state protoimpl.MessageState + sizeCache protoimpl.SizeCache + unknownFields protoimpl.UnknownFields + + // Operation execution status with one of the enumerated codes. + Status *status.Status `protobuf:"bytes,1,opt,name=status,proto3" json:"status,omitempty"` +} + +func (x *ReplicateResponse) Reset() { + *x = ReplicateResponse{} + if protoimpl.UnsafeEnabled { + mi := &file_object_grpc_service_proto_msgTypes[17] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) + } +} + +func (x *ReplicateResponse) String() string { + return protoimpl.X.MessageStringOf(x) +} + +func (*ReplicateResponse) ProtoMessage() {} + +func (x *ReplicateResponse) ProtoReflect() protoreflect.Message { + mi := &file_object_grpc_service_proto_msgTypes[17] + if protoimpl.UnsafeEnabled && x != nil { + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + if ms.LoadMessageInfo() == nil { + ms.StoreMessageInfo(mi) + } + return ms + } + return mi.MessageOf(x) +} + +// Deprecated: Use ReplicateResponse.ProtoReflect.Descriptor instead. +func (*ReplicateResponse) Descriptor() ([]byte, []int) { + return file_object_grpc_service_proto_rawDescGZIP(), []int{17} +} + +func (x *ReplicateResponse) GetStatus() *status.Status { + if x != nil { + return x.Status + } + return nil +} + +// GET Object request body +type GetRequest_Body struct { + state protoimpl.MessageState + sizeCache protoimpl.SizeCache + unknownFields protoimpl.UnknownFields + + // Address of the requested object + Address *refs.Address `protobuf:"bytes,1,opt,name=address,proto3" json:"address,omitempty"` + // If `raw` flag is set, request will work only with objects that are + // physically stored on the peer node + Raw bool `protobuf:"varint,2,opt,name=raw,proto3" json:"raw,omitempty"` +} + +func (x *GetRequest_Body) Reset() { + *x = GetRequest_Body{} + if protoimpl.UnsafeEnabled { + mi := &file_object_grpc_service_proto_msgTypes[18] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) + } +} + +func (x *GetRequest_Body) String() string { + return protoimpl.X.MessageStringOf(x) +} + +func (*GetRequest_Body) ProtoMessage() {} + +func (x *GetRequest_Body) ProtoReflect() protoreflect.Message { + mi := &file_object_grpc_service_proto_msgTypes[18] + if protoimpl.UnsafeEnabled && x != nil { + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + if ms.LoadMessageInfo() == nil { + ms.StoreMessageInfo(mi) + } + return ms + } + return mi.MessageOf(x) +} + +// Deprecated: Use GetRequest_Body.ProtoReflect.Descriptor instead. +func (*GetRequest_Body) Descriptor() ([]byte, []int) { + return file_object_grpc_service_proto_rawDescGZIP(), []int{0, 0} +} + +func (x *GetRequest_Body) GetAddress() *refs.Address { + if x != nil { + return x.Address + } + return nil +} + +func (x *GetRequest_Body) GetRaw() bool { + if x != nil { + return x.Raw + } + return false +} + +// GET Object Response body +type GetResponse_Body struct { + state protoimpl.MessageState + sizeCache protoimpl.SizeCache + unknownFields protoimpl.UnknownFields + + // Single message in the response stream. + // + // Types that are assignable to ObjectPart: + // + // *GetResponse_Body_Init_ + // *GetResponse_Body_Chunk + // *GetResponse_Body_SplitInfo + ObjectPart isGetResponse_Body_ObjectPart `protobuf_oneof:"object_part"` +} + +func (x *GetResponse_Body) Reset() { + *x = GetResponse_Body{} + if protoimpl.UnsafeEnabled { + mi := &file_object_grpc_service_proto_msgTypes[19] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) + } +} + +func (x *GetResponse_Body) String() string { + return protoimpl.X.MessageStringOf(x) +} + +func (*GetResponse_Body) ProtoMessage() {} + +func (x *GetResponse_Body) ProtoReflect() protoreflect.Message { + mi := &file_object_grpc_service_proto_msgTypes[19] + if protoimpl.UnsafeEnabled && x != nil { + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + if ms.LoadMessageInfo() == nil { + ms.StoreMessageInfo(mi) + } + return ms + } + return mi.MessageOf(x) +} + +// Deprecated: Use GetResponse_Body.ProtoReflect.Descriptor instead. +func (*GetResponse_Body) Descriptor() ([]byte, []int) { + return file_object_grpc_service_proto_rawDescGZIP(), []int{1, 0} +} + +func (m *GetResponse_Body) GetObjectPart() isGetResponse_Body_ObjectPart { + if m != nil { + return m.ObjectPart + } + return nil +} + +func (x *GetResponse_Body) GetInit() *GetResponse_Body_Init { + if x, ok := x.GetObjectPart().(*GetResponse_Body_Init_); ok { + return x.Init + } + return nil +} + +func (x *GetResponse_Body) GetChunk() []byte { + if x, ok := x.GetObjectPart().(*GetResponse_Body_Chunk); ok { + return x.Chunk + } + return nil +} + +func (x *GetResponse_Body) GetSplitInfo() *SplitInfo { + if x, ok := x.GetObjectPart().(*GetResponse_Body_SplitInfo); ok { + return x.SplitInfo + } + return nil +} + +type isGetResponse_Body_ObjectPart interface { + isGetResponse_Body_ObjectPart() +} + +type GetResponse_Body_Init_ struct { + // Initial part of the object stream + Init *GetResponse_Body_Init `protobuf:"bytes,1,opt,name=init,proto3,oneof"` +} + +type GetResponse_Body_Chunk struct { + // Chunked object payload + Chunk []byte `protobuf:"bytes,2,opt,name=chunk,proto3,oneof"` +} + +type GetResponse_Body_SplitInfo struct { + // Meta information of split hierarchy for object assembly. + SplitInfo *SplitInfo `protobuf:"bytes,3,opt,name=split_info,json=splitInfo,proto3,oneof"` +} + +func (*GetResponse_Body_Init_) isGetResponse_Body_ObjectPart() {} + +func (*GetResponse_Body_Chunk) isGetResponse_Body_ObjectPart() {} + +func (*GetResponse_Body_SplitInfo) isGetResponse_Body_ObjectPart() {} + +// Initial part of the `Object` structure stream. Technically it's a +// set of all `Object` structure's fields except `payload`. +type GetResponse_Body_Init struct { + state protoimpl.MessageState + sizeCache protoimpl.SizeCache + unknownFields protoimpl.UnknownFields + + // Object's unique identifier. + ObjectId *refs.ObjectID `protobuf:"bytes,1,opt,name=object_id,json=objectId,proto3" json:"object_id,omitempty"` + // Signed `ObjectID` + Signature *refs.Signature `protobuf:"bytes,2,opt,name=signature,proto3" json:"signature,omitempty"` + // Object metadata headers + Header *Header `protobuf:"bytes,3,opt,name=header,proto3" json:"header,omitempty"` +} + +func (x *GetResponse_Body_Init) Reset() { + *x = GetResponse_Body_Init{} + if protoimpl.UnsafeEnabled { + mi := &file_object_grpc_service_proto_msgTypes[20] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) + } +} + +func (x *GetResponse_Body_Init) String() string { + return protoimpl.X.MessageStringOf(x) +} + +func (*GetResponse_Body_Init) ProtoMessage() {} + +func (x *GetResponse_Body_Init) ProtoReflect() protoreflect.Message { + mi := &file_object_grpc_service_proto_msgTypes[20] + if protoimpl.UnsafeEnabled && x != nil { + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + if ms.LoadMessageInfo() == nil { + ms.StoreMessageInfo(mi) + } + return ms + } + return mi.MessageOf(x) +} + +// Deprecated: Use GetResponse_Body_Init.ProtoReflect.Descriptor instead. +func (*GetResponse_Body_Init) Descriptor() ([]byte, []int) { + return file_object_grpc_service_proto_rawDescGZIP(), []int{1, 0, 0} +} + +func (x *GetResponse_Body_Init) GetObjectId() *refs.ObjectID { + if x != nil { + return x.ObjectId + } + return nil +} + +func (x *GetResponse_Body_Init) GetSignature() *refs.Signature { + if x != nil { + return x.Signature + } + return nil +} + +func (x *GetResponse_Body_Init) GetHeader() *Header { + if x != nil { + return x.Header + } + return nil +} + +// PUT request body +type PutRequest_Body struct { + state protoimpl.MessageState + sizeCache protoimpl.SizeCache + unknownFields protoimpl.UnknownFields + + // Single message in the request stream. + // + // Types that are assignable to ObjectPart: + // + // *PutRequest_Body_Init_ + // *PutRequest_Body_Chunk + ObjectPart isPutRequest_Body_ObjectPart `protobuf_oneof:"object_part"` +} + +func (x *PutRequest_Body) Reset() { + *x = PutRequest_Body{} + if protoimpl.UnsafeEnabled { + mi := &file_object_grpc_service_proto_msgTypes[21] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) + } +} + +func (x *PutRequest_Body) String() string { + return protoimpl.X.MessageStringOf(x) +} + +func (*PutRequest_Body) ProtoMessage() {} + +func (x *PutRequest_Body) ProtoReflect() protoreflect.Message { + mi := &file_object_grpc_service_proto_msgTypes[21] + if protoimpl.UnsafeEnabled && x != nil { + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + if ms.LoadMessageInfo() == nil { + ms.StoreMessageInfo(mi) + } + return ms + } + return mi.MessageOf(x) +} + +// Deprecated: Use PutRequest_Body.ProtoReflect.Descriptor instead. +func (*PutRequest_Body) Descriptor() ([]byte, []int) { + return file_object_grpc_service_proto_rawDescGZIP(), []int{2, 0} +} + +func (m *PutRequest_Body) GetObjectPart() isPutRequest_Body_ObjectPart { + if m != nil { + return m.ObjectPart + } + return nil +} + +func (x *PutRequest_Body) GetInit() *PutRequest_Body_Init { + if x, ok := x.GetObjectPart().(*PutRequest_Body_Init_); ok { + return x.Init + } + return nil +} + +func (x *PutRequest_Body) GetChunk() []byte { + if x, ok := x.GetObjectPart().(*PutRequest_Body_Chunk); ok { + return x.Chunk + } + return nil +} + +type isPutRequest_Body_ObjectPart interface { + isPutRequest_Body_ObjectPart() +} + +type PutRequest_Body_Init_ struct { + // Initial part of the object stream + Init *PutRequest_Body_Init `protobuf:"bytes,1,opt,name=init,proto3,oneof"` +} + +type PutRequest_Body_Chunk struct { + // Chunked object payload + Chunk []byte `protobuf:"bytes,2,opt,name=chunk,proto3,oneof"` +} + +func (*PutRequest_Body_Init_) isPutRequest_Body_ObjectPart() {} + +func (*PutRequest_Body_Chunk) isPutRequest_Body_ObjectPart() {} + +// Newly created object structure parameters. If some optional parameters +// are not set, they will be calculated by a peer node. +type PutRequest_Body_Init struct { + state protoimpl.MessageState + sizeCache protoimpl.SizeCache + unknownFields protoimpl.UnknownFields + + // ObjectID if available. + ObjectId *refs.ObjectID `protobuf:"bytes,1,opt,name=object_id,json=objectId,proto3" json:"object_id,omitempty"` + // Object signature if available + Signature *refs.Signature `protobuf:"bytes,2,opt,name=signature,proto3" json:"signature,omitempty"` + // Object's Header. The maximum length is 16KB. The only exclusion are + // replication requests, i.e. requests sent by container nodes with + // 'meta_header.ttl=1': for such cases the limit is 4MB. + Header *Header `protobuf:"bytes,3,opt,name=header,proto3" json:"header,omitempty"` + // Number of the object copies to store within the RPC call. By default + // object is processed according to the container's placement policy. + CopiesNumber uint32 `protobuf:"varint,4,opt,name=copies_number,json=copiesNumber,proto3" json:"copies_number,omitempty"` +} + +func (x *PutRequest_Body_Init) Reset() { + *x = PutRequest_Body_Init{} + if protoimpl.UnsafeEnabled { + mi := &file_object_grpc_service_proto_msgTypes[22] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) + } +} + +func (x *PutRequest_Body_Init) String() string { + return protoimpl.X.MessageStringOf(x) +} + +func (*PutRequest_Body_Init) ProtoMessage() {} + +func (x *PutRequest_Body_Init) ProtoReflect() protoreflect.Message { + mi := &file_object_grpc_service_proto_msgTypes[22] + if protoimpl.UnsafeEnabled && x != nil { + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + if ms.LoadMessageInfo() == nil { + ms.StoreMessageInfo(mi) + } + return ms + } + return mi.MessageOf(x) +} + +// Deprecated: Use PutRequest_Body_Init.ProtoReflect.Descriptor instead. +func (*PutRequest_Body_Init) Descriptor() ([]byte, []int) { + return file_object_grpc_service_proto_rawDescGZIP(), []int{2, 0, 0} +} + +func (x *PutRequest_Body_Init) GetObjectId() *refs.ObjectID { + if x != nil { + return x.ObjectId + } + return nil +} + +func (x *PutRequest_Body_Init) GetSignature() *refs.Signature { + if x != nil { + return x.Signature + } + return nil +} + +func (x *PutRequest_Body_Init) GetHeader() *Header { + if x != nil { + return x.Header + } + return nil +} + +func (x *PutRequest_Body_Init) GetCopiesNumber() uint32 { + if x != nil { + return x.CopiesNumber + } + return 0 +} + +// PUT Object response body +type PutResponse_Body struct { + state protoimpl.MessageState + sizeCache protoimpl.SizeCache + unknownFields protoimpl.UnknownFields + + // Identifier of the saved object + ObjectId *refs.ObjectID `protobuf:"bytes,1,opt,name=object_id,json=objectId,proto3" json:"object_id,omitempty"` +} + +func (x *PutResponse_Body) Reset() { + *x = PutResponse_Body{} + if protoimpl.UnsafeEnabled { + mi := &file_object_grpc_service_proto_msgTypes[23] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) + } +} + +func (x *PutResponse_Body) String() string { + return protoimpl.X.MessageStringOf(x) +} + +func (*PutResponse_Body) ProtoMessage() {} + +func (x *PutResponse_Body) ProtoReflect() protoreflect.Message { + mi := &file_object_grpc_service_proto_msgTypes[23] + if protoimpl.UnsafeEnabled && x != nil { + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + if ms.LoadMessageInfo() == nil { + ms.StoreMessageInfo(mi) + } + return ms + } + return mi.MessageOf(x) +} + +// Deprecated: Use PutResponse_Body.ProtoReflect.Descriptor instead. +func (*PutResponse_Body) Descriptor() ([]byte, []int) { + return file_object_grpc_service_proto_rawDescGZIP(), []int{3, 0} +} + +func (x *PutResponse_Body) GetObjectId() *refs.ObjectID { + if x != nil { + return x.ObjectId + } + return nil +} + +// Object DELETE request body +type DeleteRequest_Body struct { + state protoimpl.MessageState + sizeCache protoimpl.SizeCache + unknownFields protoimpl.UnknownFields + + // Address of the object to be deleted + Address *refs.Address `protobuf:"bytes,1,opt,name=address,proto3" json:"address,omitempty"` +} + +func (x *DeleteRequest_Body) Reset() { + *x = DeleteRequest_Body{} + if protoimpl.UnsafeEnabled { + mi := &file_object_grpc_service_proto_msgTypes[24] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) + } +} + +func (x *DeleteRequest_Body) String() string { + return protoimpl.X.MessageStringOf(x) +} + +func (*DeleteRequest_Body) ProtoMessage() {} + +func (x *DeleteRequest_Body) ProtoReflect() protoreflect.Message { + mi := &file_object_grpc_service_proto_msgTypes[24] + if protoimpl.UnsafeEnabled && x != nil { + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + if ms.LoadMessageInfo() == nil { + ms.StoreMessageInfo(mi) + } + return ms + } + return mi.MessageOf(x) +} + +// Deprecated: Use DeleteRequest_Body.ProtoReflect.Descriptor instead. +func (*DeleteRequest_Body) Descriptor() ([]byte, []int) { + return file_object_grpc_service_proto_rawDescGZIP(), []int{4, 0} +} + +func (x *DeleteRequest_Body) GetAddress() *refs.Address { + if x != nil { + return x.Address + } + return nil +} + +// Object DELETE Response has an empty body. +type DeleteResponse_Body struct { + state protoimpl.MessageState + sizeCache protoimpl.SizeCache + unknownFields protoimpl.UnknownFields + + // Address of the tombstone created for the deleted object + Tombstone *refs.Address `protobuf:"bytes,1,opt,name=tombstone,proto3" json:"tombstone,omitempty"` +} + +func (x *DeleteResponse_Body) Reset() { + *x = DeleteResponse_Body{} + if protoimpl.UnsafeEnabled { + mi := &file_object_grpc_service_proto_msgTypes[25] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) + } +} + +func (x *DeleteResponse_Body) String() string { + return protoimpl.X.MessageStringOf(x) +} + +func (*DeleteResponse_Body) ProtoMessage() {} + +func (x *DeleteResponse_Body) ProtoReflect() protoreflect.Message { + mi := &file_object_grpc_service_proto_msgTypes[25] + if protoimpl.UnsafeEnabled && x != nil { + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + if ms.LoadMessageInfo() == nil { + ms.StoreMessageInfo(mi) + } + return ms + } + return mi.MessageOf(x) +} + +// Deprecated: Use DeleteResponse_Body.ProtoReflect.Descriptor instead. +func (*DeleteResponse_Body) Descriptor() ([]byte, []int) { + return file_object_grpc_service_proto_rawDescGZIP(), []int{5, 0} +} + +func (x *DeleteResponse_Body) GetTombstone() *refs.Address { + if x != nil { + return x.Tombstone + } + return nil +} + +// Object HEAD request body +type HeadRequest_Body struct { + state protoimpl.MessageState + sizeCache protoimpl.SizeCache + unknownFields protoimpl.UnknownFields + + // Address of the object with the requested Header + Address *refs.Address `protobuf:"bytes,1,opt,name=address,proto3" json:"address,omitempty"` + // Return only minimal header subset + MainOnly bool `protobuf:"varint,2,opt,name=main_only,json=mainOnly,proto3" json:"main_only,omitempty"` + // If `raw` flag is set, request will work only with objects that are + // physically stored on the peer node + Raw bool `protobuf:"varint,3,opt,name=raw,proto3" json:"raw,omitempty"` +} + +func (x *HeadRequest_Body) Reset() { + *x = HeadRequest_Body{} + if protoimpl.UnsafeEnabled { + mi := &file_object_grpc_service_proto_msgTypes[26] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) + } +} + +func (x *HeadRequest_Body) String() string { + return protoimpl.X.MessageStringOf(x) +} + +func (*HeadRequest_Body) ProtoMessage() {} + +func (x *HeadRequest_Body) ProtoReflect() protoreflect.Message { + mi := &file_object_grpc_service_proto_msgTypes[26] + if protoimpl.UnsafeEnabled && x != nil { + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + if ms.LoadMessageInfo() == nil { + ms.StoreMessageInfo(mi) + } + return ms + } + return mi.MessageOf(x) +} + +// Deprecated: Use HeadRequest_Body.ProtoReflect.Descriptor instead. +func (*HeadRequest_Body) Descriptor() ([]byte, []int) { + return file_object_grpc_service_proto_rawDescGZIP(), []int{6, 0} +} + +func (x *HeadRequest_Body) GetAddress() *refs.Address { + if x != nil { + return x.Address + } + return nil +} + +func (x *HeadRequest_Body) GetMainOnly() bool { + if x != nil { + return x.MainOnly + } + return false +} + +func (x *HeadRequest_Body) GetRaw() bool { + if x != nil { + return x.Raw + } + return false +} + +// Object HEAD response body +type HeadResponse_Body struct { + state protoimpl.MessageState + sizeCache protoimpl.SizeCache + unknownFields protoimpl.UnknownFields + + // Requested object header, it's part or meta information about split + // object. + // + // Types that are assignable to Head: + // + // *HeadResponse_Body_Header + // *HeadResponse_Body_ShortHeader + // *HeadResponse_Body_SplitInfo + Head isHeadResponse_Body_Head `protobuf_oneof:"head"` +} + +func (x *HeadResponse_Body) Reset() { + *x = HeadResponse_Body{} + if protoimpl.UnsafeEnabled { + mi := &file_object_grpc_service_proto_msgTypes[27] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) + } +} + +func (x *HeadResponse_Body) String() string { + return protoimpl.X.MessageStringOf(x) +} + +func (*HeadResponse_Body) ProtoMessage() {} + +func (x *HeadResponse_Body) ProtoReflect() protoreflect.Message { + mi := &file_object_grpc_service_proto_msgTypes[27] + if protoimpl.UnsafeEnabled && x != nil { + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + if ms.LoadMessageInfo() == nil { + ms.StoreMessageInfo(mi) + } + return ms + } + return mi.MessageOf(x) +} + +// Deprecated: Use HeadResponse_Body.ProtoReflect.Descriptor instead. +func (*HeadResponse_Body) Descriptor() ([]byte, []int) { + return file_object_grpc_service_proto_rawDescGZIP(), []int{8, 0} +} + +func (m *HeadResponse_Body) GetHead() isHeadResponse_Body_Head { + if m != nil { + return m.Head + } + return nil +} + +func (x *HeadResponse_Body) GetHeader() *HeaderWithSignature { + if x, ok := x.GetHead().(*HeadResponse_Body_Header); ok { + return x.Header + } + return nil +} + +func (x *HeadResponse_Body) GetShortHeader() *ShortHeader { + if x, ok := x.GetHead().(*HeadResponse_Body_ShortHeader); ok { + return x.ShortHeader + } + return nil +} + +func (x *HeadResponse_Body) GetSplitInfo() *SplitInfo { + if x, ok := x.GetHead().(*HeadResponse_Body_SplitInfo); ok { + return x.SplitInfo + } + return nil +} + +type isHeadResponse_Body_Head interface { + isHeadResponse_Body_Head() +} + +type HeadResponse_Body_Header struct { + // Full object's `Header` with `ObjectID` signature + Header *HeaderWithSignature `protobuf:"bytes,1,opt,name=header,proto3,oneof"` +} + +type HeadResponse_Body_ShortHeader struct { + // Short object header + ShortHeader *ShortHeader `protobuf:"bytes,2,opt,name=short_header,json=shortHeader,proto3,oneof"` +} + +type HeadResponse_Body_SplitInfo struct { + // Meta information of split hierarchy. + SplitInfo *SplitInfo `protobuf:"bytes,3,opt,name=split_info,json=splitInfo,proto3,oneof"` +} + +func (*HeadResponse_Body_Header) isHeadResponse_Body_Head() {} + +func (*HeadResponse_Body_ShortHeader) isHeadResponse_Body_Head() {} + +func (*HeadResponse_Body_SplitInfo) isHeadResponse_Body_Head() {} + +// Object Search request body +type SearchRequest_Body struct { + state protoimpl.MessageState + sizeCache protoimpl.SizeCache + unknownFields protoimpl.UnknownFields + + // Container identifier were to search + ContainerId *refs.ContainerID `protobuf:"bytes,1,opt,name=container_id,json=containerId,proto3" json:"container_id,omitempty"` + // Version of the Query Language used + Version uint32 `protobuf:"varint,2,opt,name=version,proto3" json:"version,omitempty"` + // List of search expressions + Filters []*SearchRequest_Body_Filter `protobuf:"bytes,3,rep,name=filters,proto3" json:"filters,omitempty"` +} + +func (x *SearchRequest_Body) Reset() { + *x = SearchRequest_Body{} + if protoimpl.UnsafeEnabled { + mi := &file_object_grpc_service_proto_msgTypes[28] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) + } +} + +func (x *SearchRequest_Body) String() string { + return protoimpl.X.MessageStringOf(x) +} + +func (*SearchRequest_Body) ProtoMessage() {} + +func (x *SearchRequest_Body) ProtoReflect() protoreflect.Message { + mi := &file_object_grpc_service_proto_msgTypes[28] + if protoimpl.UnsafeEnabled && x != nil { + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + if ms.LoadMessageInfo() == nil { + ms.StoreMessageInfo(mi) + } + return ms + } + return mi.MessageOf(x) +} + +// Deprecated: Use SearchRequest_Body.ProtoReflect.Descriptor instead. +func (*SearchRequest_Body) Descriptor() ([]byte, []int) { + return file_object_grpc_service_proto_rawDescGZIP(), []int{9, 0} +} + +func (x *SearchRequest_Body) GetContainerId() *refs.ContainerID { + if x != nil { + return x.ContainerId + } + return nil +} + +func (x *SearchRequest_Body) GetVersion() uint32 { + if x != nil { + return x.Version + } + return 0 +} + +func (x *SearchRequest_Body) GetFilters() []*SearchRequest_Body_Filter { + if x != nil { + return x.Filters + } + return nil +} + +// Filter structure checks if the object header field or the attribute content +// matches a value. +// +// If no filters are set, search request will return all objects of the +// container, including Regular object, Tombstones and Storage Group +// objects. Most human users expect to get only object they can directly +// work with. In that case, `$Object:ROOT` filter should be used. +// +// If `match_type` field is numerical, both `value` field and object +// attribute MUST be base-10 integers. +// +// By default `key` field refers to the corresponding object's `Attribute`. +// Some Object's header fields can also be accessed by adding `$Object:` +// prefix to the name. Here is the list of fields available via this prefix: +// +// - $Object:version \ +// version +// - $Object:objectID \ +// object_id +// - $Object:containerID \ +// container_id +// - $Object:ownerID \ +// owner_id +// - $Object:creationEpoch \ +// creation_epoch +// - $Object:payloadLength \ +// payload_length +// - $Object:payloadHash \ +// payload_hash +// - $Object:objectType \ +// object_type +// - $Object:homomorphicHash \ +// homomorphic_hash +// - $Object:split.parent \ +// object_id of parent +// - $Object:split.splitID \ +// 16 byte UUIDv4 used to identify the split object hierarchy parts +// +// There are some well-known filter aliases to match objects by certain +// properties: +// +// - $Object:ROOT \ +// Returns only `REGULAR` type objects that are not split or that are the top +// level root objects in a split hierarchy. This includes objects not +// present physically, like large objects split into smaller objects +// without a separate top-level root object. Objects of other types like +// StorageGroups and Tombstones will not be shown. This filter may be +// useful for listing objects like `ls` command of some virtual file +// system. This filter is activated if the `key` exists, disregarding the +// value and matcher type. +// - $Object:PHY \ +// Returns only objects physically stored in the system. This filter is +// activated if the `key` exists, disregarding the value and matcher type. +// +// Note: using filters with a key with prefix `$Object:` and match type +// `NOT_PRESENT `is not recommended since this is not a cross-version approach. +// Behavior when processing this kind of filters is undefined. +type SearchRequest_Body_Filter struct { + state protoimpl.MessageState + sizeCache protoimpl.SizeCache + unknownFields protoimpl.UnknownFields + + // Match type to use + MatchType MatchType `protobuf:"varint,1,opt,name=match_type,json=matchType,proto3,enum=neo.fs.v2.object.MatchType" json:"match_type,omitempty"` + // Attribute or Header fields to match + Key string `protobuf:"bytes,2,opt,name=key,proto3" json:"key,omitempty"` + // Value to match + Value string `protobuf:"bytes,3,opt,name=value,proto3" json:"value,omitempty"` +} + +func (x *SearchRequest_Body_Filter) Reset() { + *x = SearchRequest_Body_Filter{} + if protoimpl.UnsafeEnabled { + mi := &file_object_grpc_service_proto_msgTypes[29] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) + } +} + +func (x *SearchRequest_Body_Filter) String() string { + return protoimpl.X.MessageStringOf(x) +} + +func (*SearchRequest_Body_Filter) ProtoMessage() {} + +func (x *SearchRequest_Body_Filter) ProtoReflect() protoreflect.Message { + mi := &file_object_grpc_service_proto_msgTypes[29] + if protoimpl.UnsafeEnabled && x != nil { + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + if ms.LoadMessageInfo() == nil { + ms.StoreMessageInfo(mi) + } + return ms + } + return mi.MessageOf(x) +} + +// Deprecated: Use SearchRequest_Body_Filter.ProtoReflect.Descriptor instead. +func (*SearchRequest_Body_Filter) Descriptor() ([]byte, []int) { + return file_object_grpc_service_proto_rawDescGZIP(), []int{9, 0, 0} +} + +func (x *SearchRequest_Body_Filter) GetMatchType() MatchType { + if x != nil { + return x.MatchType + } + return MatchType_MATCH_TYPE_UNSPECIFIED +} + +func (x *SearchRequest_Body_Filter) GetKey() string { + if x != nil { + return x.Key + } + return "" +} + +func (x *SearchRequest_Body_Filter) GetValue() string { + if x != nil { + return x.Value + } + return "" +} + +// Object Search response body +type SearchResponse_Body struct { + state protoimpl.MessageState + sizeCache protoimpl.SizeCache + unknownFields protoimpl.UnknownFields + + // List of `ObjectID`s that match the search query + IdList []*refs.ObjectID `protobuf:"bytes,1,rep,name=id_list,json=idList,proto3" json:"id_list,omitempty"` +} + +func (x *SearchResponse_Body) Reset() { + *x = SearchResponse_Body{} + if protoimpl.UnsafeEnabled { + mi := &file_object_grpc_service_proto_msgTypes[30] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) + } +} + +func (x *SearchResponse_Body) String() string { + return protoimpl.X.MessageStringOf(x) +} + +func (*SearchResponse_Body) ProtoMessage() {} + +func (x *SearchResponse_Body) ProtoReflect() protoreflect.Message { + mi := &file_object_grpc_service_proto_msgTypes[30] + if protoimpl.UnsafeEnabled && x != nil { + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + if ms.LoadMessageInfo() == nil { + ms.StoreMessageInfo(mi) + } + return ms + } + return mi.MessageOf(x) +} + +// Deprecated: Use SearchResponse_Body.ProtoReflect.Descriptor instead. +func (*SearchResponse_Body) Descriptor() ([]byte, []int) { + return file_object_grpc_service_proto_rawDescGZIP(), []int{10, 0} +} + +func (x *SearchResponse_Body) GetIdList() []*refs.ObjectID { + if x != nil { + return x.IdList + } + return nil +} + +// Byte range of object's payload request body +type GetRangeRequest_Body struct { + state protoimpl.MessageState + sizeCache protoimpl.SizeCache + unknownFields protoimpl.UnknownFields + + // Address of the object containing the requested payload range + Address *refs.Address `protobuf:"bytes,1,opt,name=address,proto3" json:"address,omitempty"` + // Requested payload range + Range *Range `protobuf:"bytes,2,opt,name=range,proto3" json:"range,omitempty"` + // If `raw` flag is set, request will work only with objects that are + // physically stored on the peer node. + Raw bool `protobuf:"varint,3,opt,name=raw,proto3" json:"raw,omitempty"` +} + +func (x *GetRangeRequest_Body) Reset() { + *x = GetRangeRequest_Body{} + if protoimpl.UnsafeEnabled { + mi := &file_object_grpc_service_proto_msgTypes[31] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) + } +} + +func (x *GetRangeRequest_Body) String() string { + return protoimpl.X.MessageStringOf(x) +} + +func (*GetRangeRequest_Body) ProtoMessage() {} + +func (x *GetRangeRequest_Body) ProtoReflect() protoreflect.Message { + mi := &file_object_grpc_service_proto_msgTypes[31] + if protoimpl.UnsafeEnabled && x != nil { + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + if ms.LoadMessageInfo() == nil { + ms.StoreMessageInfo(mi) + } + return ms + } + return mi.MessageOf(x) +} + +// Deprecated: Use GetRangeRequest_Body.ProtoReflect.Descriptor instead. +func (*GetRangeRequest_Body) Descriptor() ([]byte, []int) { + return file_object_grpc_service_proto_rawDescGZIP(), []int{12, 0} +} + +func (x *GetRangeRequest_Body) GetAddress() *refs.Address { + if x != nil { + return x.Address + } + return nil +} + +func (x *GetRangeRequest_Body) GetRange() *Range { + if x != nil { + return x.Range + } + return nil +} + +func (x *GetRangeRequest_Body) GetRaw() bool { + if x != nil { + return x.Raw + } + return false +} + +// Get Range response body uses streams to transfer the response. Because +// object payload considered a byte sequence, there is no need to have some +// initial preamble message. The requested byte range is sent as a series +// chunks. +type GetRangeResponse_Body struct { + state protoimpl.MessageState + sizeCache protoimpl.SizeCache + unknownFields protoimpl.UnknownFields + + // Requested object range or meta information about split object. + // + // Types that are assignable to RangePart: + // + // *GetRangeResponse_Body_Chunk + // *GetRangeResponse_Body_SplitInfo + RangePart isGetRangeResponse_Body_RangePart `protobuf_oneof:"range_part"` +} + +func (x *GetRangeResponse_Body) Reset() { + *x = GetRangeResponse_Body{} + if protoimpl.UnsafeEnabled { + mi := &file_object_grpc_service_proto_msgTypes[32] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) + } +} + +func (x *GetRangeResponse_Body) String() string { + return protoimpl.X.MessageStringOf(x) +} + +func (*GetRangeResponse_Body) ProtoMessage() {} + +func (x *GetRangeResponse_Body) ProtoReflect() protoreflect.Message { + mi := &file_object_grpc_service_proto_msgTypes[32] + if protoimpl.UnsafeEnabled && x != nil { + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + if ms.LoadMessageInfo() == nil { + ms.StoreMessageInfo(mi) + } + return ms + } + return mi.MessageOf(x) +} + +// Deprecated: Use GetRangeResponse_Body.ProtoReflect.Descriptor instead. +func (*GetRangeResponse_Body) Descriptor() ([]byte, []int) { + return file_object_grpc_service_proto_rawDescGZIP(), []int{13, 0} +} + +func (m *GetRangeResponse_Body) GetRangePart() isGetRangeResponse_Body_RangePart { + if m != nil { + return m.RangePart + } + return nil +} + +func (x *GetRangeResponse_Body) GetChunk() []byte { + if x, ok := x.GetRangePart().(*GetRangeResponse_Body_Chunk); ok { + return x.Chunk + } + return nil +} + +func (x *GetRangeResponse_Body) GetSplitInfo() *SplitInfo { + if x, ok := x.GetRangePart().(*GetRangeResponse_Body_SplitInfo); ok { + return x.SplitInfo + } + return nil +} + +type isGetRangeResponse_Body_RangePart interface { + isGetRangeResponse_Body_RangePart() +} + +type GetRangeResponse_Body_Chunk struct { + // Chunked object payload's range. + Chunk []byte `protobuf:"bytes,1,opt,name=chunk,proto3,oneof"` +} + +type GetRangeResponse_Body_SplitInfo struct { + // Meta information of split hierarchy. + SplitInfo *SplitInfo `protobuf:"bytes,2,opt,name=split_info,json=splitInfo,proto3,oneof"` +} + +func (*GetRangeResponse_Body_Chunk) isGetRangeResponse_Body_RangePart() {} + +func (*GetRangeResponse_Body_SplitInfo) isGetRangeResponse_Body_RangePart() {} + +// Get hash of object's payload part request body. +type GetRangeHashRequest_Body struct { + state protoimpl.MessageState + sizeCache protoimpl.SizeCache + unknownFields protoimpl.UnknownFields + + // Address of the object that containing the requested payload range + Address *refs.Address `protobuf:"bytes,1,opt,name=address,proto3" json:"address,omitempty"` + // List of object's payload ranges to calculate homomorphic hash + Ranges []*Range `protobuf:"bytes,2,rep,name=ranges,proto3" json:"ranges,omitempty"` + // Binary salt to XOR object's payload ranges before hash calculation + Salt []byte `protobuf:"bytes,3,opt,name=salt,proto3" json:"salt,omitempty"` + // Checksum algorithm type + Type refs.ChecksumType `protobuf:"varint,4,opt,name=type,proto3,enum=neo.fs.v2.refs.ChecksumType" json:"type,omitempty"` +} + +func (x *GetRangeHashRequest_Body) Reset() { + *x = GetRangeHashRequest_Body{} + if protoimpl.UnsafeEnabled { + mi := &file_object_grpc_service_proto_msgTypes[33] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) + } +} + +func (x *GetRangeHashRequest_Body) String() string { + return protoimpl.X.MessageStringOf(x) +} + +func (*GetRangeHashRequest_Body) ProtoMessage() {} + +func (x *GetRangeHashRequest_Body) ProtoReflect() protoreflect.Message { + mi := &file_object_grpc_service_proto_msgTypes[33] + if protoimpl.UnsafeEnabled && x != nil { + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + if ms.LoadMessageInfo() == nil { + ms.StoreMessageInfo(mi) + } + return ms + } + return mi.MessageOf(x) +} + +// Deprecated: Use GetRangeHashRequest_Body.ProtoReflect.Descriptor instead. +func (*GetRangeHashRequest_Body) Descriptor() ([]byte, []int) { + return file_object_grpc_service_proto_rawDescGZIP(), []int{14, 0} +} + +func (x *GetRangeHashRequest_Body) GetAddress() *refs.Address { + if x != nil { + return x.Address + } + return nil +} + +func (x *GetRangeHashRequest_Body) GetRanges() []*Range { + if x != nil { + return x.Ranges + } + return nil +} + +func (x *GetRangeHashRequest_Body) GetSalt() []byte { + if x != nil { + return x.Salt + } + return nil +} + +func (x *GetRangeHashRequest_Body) GetType() refs.ChecksumType { + if x != nil { + return x.Type + } + return refs.ChecksumType(0) +} + +// Get hash of object's payload part response body. +type GetRangeHashResponse_Body struct { + state protoimpl.MessageState + sizeCache protoimpl.SizeCache + unknownFields protoimpl.UnknownFields + + // Checksum algorithm type + Type refs.ChecksumType `protobuf:"varint,1,opt,name=type,proto3,enum=neo.fs.v2.refs.ChecksumType" json:"type,omitempty"` + // List of range hashes in a binary format + HashList [][]byte `protobuf:"bytes,2,rep,name=hash_list,json=hashList,proto3" json:"hash_list,omitempty"` +} + +func (x *GetRangeHashResponse_Body) Reset() { + *x = GetRangeHashResponse_Body{} + if protoimpl.UnsafeEnabled { + mi := &file_object_grpc_service_proto_msgTypes[34] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) + } +} + +func (x *GetRangeHashResponse_Body) String() string { + return protoimpl.X.MessageStringOf(x) +} + +func (*GetRangeHashResponse_Body) ProtoMessage() {} + +func (x *GetRangeHashResponse_Body) ProtoReflect() protoreflect.Message { + mi := &file_object_grpc_service_proto_msgTypes[34] + if protoimpl.UnsafeEnabled && x != nil { + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + if ms.LoadMessageInfo() == nil { + ms.StoreMessageInfo(mi) + } + return ms + } + return mi.MessageOf(x) +} + +// Deprecated: Use GetRangeHashResponse_Body.ProtoReflect.Descriptor instead. +func (*GetRangeHashResponse_Body) Descriptor() ([]byte, []int) { + return file_object_grpc_service_proto_rawDescGZIP(), []int{15, 0} +} + +func (x *GetRangeHashResponse_Body) GetType() refs.ChecksumType { + if x != nil { + return x.Type + } + return refs.ChecksumType(0) +} + +func (x *GetRangeHashResponse_Body) GetHashList() [][]byte { + if x != nil { + return x.HashList + } + return nil +} + +var File_object_grpc_service_proto protoreflect.FileDescriptor + +var file_object_grpc_service_proto_rawDesc = []byte{ + 0x0a, 0x19, 0x6f, 0x62, 0x6a, 0x65, 0x63, 0x74, 0x2f, 0x67, 0x72, 0x70, 0x63, 0x2f, 0x73, 0x65, + 0x72, 0x76, 0x69, 0x63, 0x65, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x12, 0x10, 0x6e, 0x65, 0x6f, + 0x2e, 0x66, 0x73, 0x2e, 0x76, 0x32, 0x2e, 0x6f, 0x62, 0x6a, 0x65, 0x63, 0x74, 0x1a, 0x17, 0x6f, + 0x62, 0x6a, 0x65, 0x63, 0x74, 0x2f, 0x67, 0x72, 0x70, 0x63, 0x2f, 0x74, 0x79, 0x70, 0x65, 0x73, + 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x1a, 0x15, 0x72, 0x65, 0x66, 0x73, 0x2f, 0x67, 0x72, 0x70, + 0x63, 0x2f, 0x74, 0x79, 0x70, 0x65, 0x73, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x1a, 0x18, 0x73, + 0x65, 0x73, 0x73, 0x69, 0x6f, 0x6e, 0x2f, 0x67, 0x72, 0x70, 0x63, 0x2f, 0x74, 0x79, 0x70, 0x65, + 0x73, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x1a, 0x17, 0x73, 0x74, 0x61, 0x74, 0x75, 0x73, 0x2f, + 0x67, 0x72, 0x70, 0x63, 0x2f, 0x74, 0x79, 0x70, 0x65, 0x73, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, + 0x22, 0xaa, 0x02, 0x0a, 0x0a, 0x47, 0x65, 0x74, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x12, + 0x35, 0x0a, 0x04, 0x62, 0x6f, 0x64, 0x79, 0x18, 0x01, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x21, 0x2e, + 0x6e, 0x65, 0x6f, 0x2e, 0x66, 0x73, 0x2e, 0x76, 0x32, 0x2e, 0x6f, 0x62, 0x6a, 0x65, 0x63, 0x74, + 0x2e, 0x47, 0x65, 0x74, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x2e, 0x42, 0x6f, 0x64, 0x79, + 0x52, 0x04, 0x62, 0x6f, 0x64, 0x79, 0x12, 0x45, 0x0a, 0x0b, 0x6d, 0x65, 0x74, 0x61, 0x5f, 0x68, + 0x65, 0x61, 0x64, 0x65, 0x72, 0x18, 0x02, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x24, 0x2e, 0x6e, 0x65, + 0x6f, 0x2e, 0x66, 0x73, 0x2e, 0x76, 0x32, 0x2e, 0x73, 0x65, 0x73, 0x73, 0x69, 0x6f, 0x6e, 0x2e, + 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x4d, 0x65, 0x74, 0x61, 0x48, 0x65, 0x61, 0x64, 0x65, + 0x72, 0x52, 0x0a, 0x6d, 0x65, 0x74, 0x61, 0x48, 0x65, 0x61, 0x64, 0x65, 0x72, 0x12, 0x51, 0x0a, + 0x0d, 0x76, 0x65, 0x72, 0x69, 0x66, 0x79, 0x5f, 0x68, 0x65, 0x61, 0x64, 0x65, 0x72, 0x18, 0x03, + 0x20, 0x01, 0x28, 0x0b, 0x32, 0x2c, 0x2e, 0x6e, 0x65, 0x6f, 0x2e, 0x66, 0x73, 0x2e, 0x76, 0x32, + 0x2e, 0x73, 0x65, 0x73, 0x73, 0x69, 0x6f, 0x6e, 0x2e, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, + 0x56, 0x65, 0x72, 0x69, 0x66, 0x69, 0x63, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x48, 0x65, 0x61, 0x64, + 0x65, 0x72, 0x52, 0x0c, 0x76, 0x65, 0x72, 0x69, 0x66, 0x79, 0x48, 0x65, 0x61, 0x64, 0x65, 0x72, + 0x1a, 0x4b, 0x0a, 0x04, 0x42, 0x6f, 0x64, 0x79, 0x12, 0x31, 0x0a, 0x07, 0x61, 0x64, 0x64, 0x72, + 0x65, 0x73, 0x73, 0x18, 0x01, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x17, 0x2e, 0x6e, 0x65, 0x6f, 0x2e, + 0x66, 0x73, 0x2e, 0x76, 0x32, 0x2e, 0x72, 0x65, 0x66, 0x73, 0x2e, 0x41, 0x64, 0x64, 0x72, 0x65, + 0x73, 0x73, 0x52, 0x07, 0x61, 0x64, 0x64, 0x72, 0x65, 0x73, 0x73, 0x12, 0x10, 0x0a, 0x03, 0x72, + 0x61, 0x77, 0x18, 0x02, 0x20, 0x01, 0x28, 0x08, 0x52, 0x03, 0x72, 0x61, 0x77, 0x22, 0xb9, 0x04, + 0x0a, 0x0b, 0x47, 0x65, 0x74, 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x12, 0x36, 0x0a, + 0x04, 0x62, 0x6f, 0x64, 0x79, 0x18, 0x01, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x22, 0x2e, 0x6e, 0x65, + 0x6f, 0x2e, 0x66, 0x73, 0x2e, 0x76, 0x32, 0x2e, 0x6f, 0x62, 0x6a, 0x65, 0x63, 0x74, 0x2e, 0x47, + 0x65, 0x74, 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x2e, 0x42, 0x6f, 0x64, 0x79, 0x52, + 0x04, 0x62, 0x6f, 0x64, 0x79, 0x12, 0x46, 0x0a, 0x0b, 0x6d, 0x65, 0x74, 0x61, 0x5f, 0x68, 0x65, + 0x61, 0x64, 0x65, 0x72, 0x18, 0x02, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x25, 0x2e, 0x6e, 0x65, 0x6f, + 0x2e, 0x66, 0x73, 0x2e, 0x76, 0x32, 0x2e, 0x73, 0x65, 0x73, 0x73, 0x69, 0x6f, 0x6e, 0x2e, 0x52, + 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x4d, 0x65, 0x74, 0x61, 0x48, 0x65, 0x61, 0x64, 0x65, + 0x72, 0x52, 0x0a, 0x6d, 0x65, 0x74, 0x61, 0x48, 0x65, 0x61, 0x64, 0x65, 0x72, 0x12, 0x52, 0x0a, + 0x0d, 0x76, 0x65, 0x72, 0x69, 0x66, 0x79, 0x5f, 0x68, 0x65, 0x61, 0x64, 0x65, 0x72, 0x18, 0x03, + 0x20, 0x01, 0x28, 0x0b, 0x32, 0x2d, 0x2e, 0x6e, 0x65, 0x6f, 0x2e, 0x66, 0x73, 0x2e, 0x76, 0x32, + 0x2e, 0x73, 0x65, 0x73, 0x73, 0x69, 0x6f, 0x6e, 0x2e, 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, + 0x65, 0x56, 0x65, 0x72, 0x69, 0x66, 0x69, 0x63, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x48, 0x65, 0x61, + 0x64, 0x65, 0x72, 0x52, 0x0c, 0x76, 0x65, 0x72, 0x69, 0x66, 0x79, 0x48, 0x65, 0x61, 0x64, 0x65, + 0x72, 0x1a, 0xd5, 0x02, 0x0a, 0x04, 0x42, 0x6f, 0x64, 0x79, 0x12, 0x3d, 0x0a, 0x04, 0x69, 0x6e, + 0x69, 0x74, 0x18, 0x01, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x27, 0x2e, 0x6e, 0x65, 0x6f, 0x2e, 0x66, + 0x73, 0x2e, 0x76, 0x32, 0x2e, 0x6f, 0x62, 0x6a, 0x65, 0x63, 0x74, 0x2e, 0x47, 0x65, 0x74, 0x52, + 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x2e, 0x42, 0x6f, 0x64, 0x79, 0x2e, 0x49, 0x6e, 0x69, + 0x74, 0x48, 0x00, 0x52, 0x04, 0x69, 0x6e, 0x69, 0x74, 0x12, 0x16, 0x0a, 0x05, 0x63, 0x68, 0x75, + 0x6e, 0x6b, 0x18, 0x02, 0x20, 0x01, 0x28, 0x0c, 0x48, 0x00, 0x52, 0x05, 0x63, 0x68, 0x75, 0x6e, + 0x6b, 0x12, 0x3c, 0x0a, 0x0a, 0x73, 0x70, 0x6c, 0x69, 0x74, 0x5f, 0x69, 0x6e, 0x66, 0x6f, 0x18, + 0x03, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x1b, 0x2e, 0x6e, 0x65, 0x6f, 0x2e, 0x66, 0x73, 0x2e, 0x76, + 0x32, 0x2e, 0x6f, 0x62, 0x6a, 0x65, 0x63, 0x74, 0x2e, 0x53, 0x70, 0x6c, 0x69, 0x74, 0x49, 0x6e, + 0x66, 0x6f, 0x48, 0x00, 0x52, 0x09, 0x73, 0x70, 0x6c, 0x69, 0x74, 0x49, 0x6e, 0x66, 0x6f, 0x1a, + 0xa8, 0x01, 0x0a, 0x04, 0x49, 0x6e, 0x69, 0x74, 0x12, 0x35, 0x0a, 0x09, 0x6f, 0x62, 0x6a, 0x65, + 0x63, 0x74, 0x5f, 0x69, 0x64, 0x18, 0x01, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x18, 0x2e, 0x6e, 0x65, + 0x6f, 0x2e, 0x66, 0x73, 0x2e, 0x76, 0x32, 0x2e, 0x72, 0x65, 0x66, 0x73, 0x2e, 0x4f, 0x62, 0x6a, + 0x65, 0x63, 0x74, 0x49, 0x44, 0x52, 0x08, 0x6f, 0x62, 0x6a, 0x65, 0x63, 0x74, 0x49, 0x64, 0x12, + 0x37, 0x0a, 0x09, 0x73, 0x69, 0x67, 0x6e, 0x61, 0x74, 0x75, 0x72, 0x65, 0x18, 0x02, 0x20, 0x01, + 0x28, 0x0b, 0x32, 0x19, 0x2e, 0x6e, 0x65, 0x6f, 0x2e, 0x66, 0x73, 0x2e, 0x76, 0x32, 0x2e, 0x72, + 0x65, 0x66, 0x73, 0x2e, 0x53, 0x69, 0x67, 0x6e, 0x61, 0x74, 0x75, 0x72, 0x65, 0x52, 0x09, 0x73, + 0x69, 0x67, 0x6e, 0x61, 0x74, 0x75, 0x72, 0x65, 0x12, 0x30, 0x0a, 0x06, 0x68, 0x65, 0x61, 0x64, + 0x65, 0x72, 0x18, 0x03, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x18, 0x2e, 0x6e, 0x65, 0x6f, 0x2e, 0x66, + 0x73, 0x2e, 0x76, 0x32, 0x2e, 0x6f, 0x62, 0x6a, 0x65, 0x63, 0x74, 0x2e, 0x48, 0x65, 0x61, 0x64, + 0x65, 0x72, 0x52, 0x06, 0x68, 0x65, 0x61, 0x64, 0x65, 0x72, 0x42, 0x0d, 0x0a, 0x0b, 0x6f, 0x62, + 0x6a, 0x65, 0x63, 0x74, 0x5f, 0x70, 0x61, 0x72, 0x74, 0x22, 0x9b, 0x04, 0x0a, 0x0a, 0x50, 0x75, + 0x74, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x12, 0x35, 0x0a, 0x04, 0x62, 0x6f, 0x64, 0x79, + 0x18, 0x01, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x21, 0x2e, 0x6e, 0x65, 0x6f, 0x2e, 0x66, 0x73, 0x2e, + 0x76, 0x32, 0x2e, 0x6f, 0x62, 0x6a, 0x65, 0x63, 0x74, 0x2e, 0x50, 0x75, 0x74, 0x52, 0x65, 0x71, + 0x75, 0x65, 0x73, 0x74, 0x2e, 0x42, 0x6f, 0x64, 0x79, 0x52, 0x04, 0x62, 0x6f, 0x64, 0x79, 0x12, + 0x45, 0x0a, 0x0b, 0x6d, 0x65, 0x74, 0x61, 0x5f, 0x68, 0x65, 0x61, 0x64, 0x65, 0x72, 0x18, 0x02, + 0x20, 0x01, 0x28, 0x0b, 0x32, 0x24, 0x2e, 0x6e, 0x65, 0x6f, 0x2e, 0x66, 0x73, 0x2e, 0x76, 0x32, + 0x2e, 0x73, 0x65, 0x73, 0x73, 0x69, 0x6f, 0x6e, 0x2e, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, + 0x4d, 0x65, 0x74, 0x61, 0x48, 0x65, 0x61, 0x64, 0x65, 0x72, 0x52, 0x0a, 0x6d, 0x65, 0x74, 0x61, + 0x48, 0x65, 0x61, 0x64, 0x65, 0x72, 0x12, 0x51, 0x0a, 0x0d, 0x76, 0x65, 0x72, 0x69, 0x66, 0x79, + 0x5f, 0x68, 0x65, 0x61, 0x64, 0x65, 0x72, 0x18, 0x03, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x2c, 0x2e, + 0x6e, 0x65, 0x6f, 0x2e, 0x66, 0x73, 0x2e, 0x76, 0x32, 0x2e, 0x73, 0x65, 0x73, 0x73, 0x69, 0x6f, + 0x6e, 0x2e, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x56, 0x65, 0x72, 0x69, 0x66, 0x69, 0x63, + 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x48, 0x65, 0x61, 0x64, 0x65, 0x72, 0x52, 0x0c, 0x76, 0x65, 0x72, + 0x69, 0x66, 0x79, 0x48, 0x65, 0x61, 0x64, 0x65, 0x72, 0x1a, 0xbb, 0x02, 0x0a, 0x04, 0x42, 0x6f, + 0x64, 0x79, 0x12, 0x3c, 0x0a, 0x04, 0x69, 0x6e, 0x69, 0x74, 0x18, 0x01, 0x20, 0x01, 0x28, 0x0b, + 0x32, 0x26, 0x2e, 0x6e, 0x65, 0x6f, 0x2e, 0x66, 0x73, 0x2e, 0x76, 0x32, 0x2e, 0x6f, 0x62, 0x6a, + 0x65, 0x63, 0x74, 0x2e, 0x50, 0x75, 0x74, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x2e, 0x42, + 0x6f, 0x64, 0x79, 0x2e, 0x49, 0x6e, 0x69, 0x74, 0x48, 0x00, 0x52, 0x04, 0x69, 0x6e, 0x69, 0x74, + 0x12, 0x16, 0x0a, 0x05, 0x63, 0x68, 0x75, 0x6e, 0x6b, 0x18, 0x02, 0x20, 0x01, 0x28, 0x0c, 0x48, + 0x00, 0x52, 0x05, 0x63, 0x68, 0x75, 0x6e, 0x6b, 0x1a, 0xcd, 0x01, 0x0a, 0x04, 0x49, 0x6e, 0x69, + 0x74, 0x12, 0x35, 0x0a, 0x09, 0x6f, 0x62, 0x6a, 0x65, 0x63, 0x74, 0x5f, 0x69, 0x64, 0x18, 0x01, + 0x20, 0x01, 0x28, 0x0b, 0x32, 0x18, 0x2e, 0x6e, 0x65, 0x6f, 0x2e, 0x66, 0x73, 0x2e, 0x76, 0x32, + 0x2e, 0x72, 0x65, 0x66, 0x73, 0x2e, 0x4f, 0x62, 0x6a, 0x65, 0x63, 0x74, 0x49, 0x44, 0x52, 0x08, + 0x6f, 0x62, 0x6a, 0x65, 0x63, 0x74, 0x49, 0x64, 0x12, 0x37, 0x0a, 0x09, 0x73, 0x69, 0x67, 0x6e, + 0x61, 0x74, 0x75, 0x72, 0x65, 0x18, 0x02, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x19, 0x2e, 0x6e, 0x65, + 0x6f, 0x2e, 0x66, 0x73, 0x2e, 0x76, 0x32, 0x2e, 0x72, 0x65, 0x66, 0x73, 0x2e, 0x53, 0x69, 0x67, + 0x6e, 0x61, 0x74, 0x75, 0x72, 0x65, 0x52, 0x09, 0x73, 0x69, 0x67, 0x6e, 0x61, 0x74, 0x75, 0x72, + 0x65, 0x12, 0x30, 0x0a, 0x06, 0x68, 0x65, 0x61, 0x64, 0x65, 0x72, 0x18, 0x03, 0x20, 0x01, 0x28, + 0x0b, 0x32, 0x18, 0x2e, 0x6e, 0x65, 0x6f, 0x2e, 0x66, 0x73, 0x2e, 0x76, 0x32, 0x2e, 0x6f, 0x62, + 0x6a, 0x65, 0x63, 0x74, 0x2e, 0x48, 0x65, 0x61, 0x64, 0x65, 0x72, 0x52, 0x06, 0x68, 0x65, 0x61, + 0x64, 0x65, 0x72, 0x12, 0x23, 0x0a, 0x0d, 0x63, 0x6f, 0x70, 0x69, 0x65, 0x73, 0x5f, 0x6e, 0x75, + 0x6d, 0x62, 0x65, 0x72, 0x18, 0x04, 0x20, 0x01, 0x28, 0x0d, 0x52, 0x0c, 0x63, 0x6f, 0x70, 0x69, + 0x65, 0x73, 0x4e, 0x75, 0x6d, 0x62, 0x65, 0x72, 0x42, 0x0d, 0x0a, 0x0b, 0x6f, 0x62, 0x6a, 0x65, + 0x63, 0x74, 0x5f, 0x70, 0x61, 0x72, 0x74, 0x22, 0xa0, 0x02, 0x0a, 0x0b, 0x50, 0x75, 0x74, 0x52, + 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x12, 0x36, 0x0a, 0x04, 0x62, 0x6f, 0x64, 0x79, 0x18, + 0x01, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x22, 0x2e, 0x6e, 0x65, 0x6f, 0x2e, 0x66, 0x73, 0x2e, 0x76, + 0x32, 0x2e, 0x6f, 0x62, 0x6a, 0x65, 0x63, 0x74, 0x2e, 0x50, 0x75, 0x74, 0x52, 0x65, 0x73, 0x70, + 0x6f, 0x6e, 0x73, 0x65, 0x2e, 0x42, 0x6f, 0x64, 0x79, 0x52, 0x04, 0x62, 0x6f, 0x64, 0x79, 0x12, + 0x46, 0x0a, 0x0b, 0x6d, 0x65, 0x74, 0x61, 0x5f, 0x68, 0x65, 0x61, 0x64, 0x65, 0x72, 0x18, 0x02, + 0x20, 0x01, 0x28, 0x0b, 0x32, 0x25, 0x2e, 0x6e, 0x65, 0x6f, 0x2e, 0x66, 0x73, 0x2e, 0x76, 0x32, + 0x2e, 0x73, 0x65, 0x73, 0x73, 0x69, 0x6f, 0x6e, 0x2e, 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, + 0x65, 0x4d, 0x65, 0x74, 0x61, 0x48, 0x65, 0x61, 0x64, 0x65, 0x72, 0x52, 0x0a, 0x6d, 0x65, 0x74, + 0x61, 0x48, 0x65, 0x61, 0x64, 0x65, 0x72, 0x12, 0x52, 0x0a, 0x0d, 0x76, 0x65, 0x72, 0x69, 0x66, + 0x79, 0x5f, 0x68, 0x65, 0x61, 0x64, 0x65, 0x72, 0x18, 0x03, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x2d, + 0x2e, 0x6e, 0x65, 0x6f, 0x2e, 0x66, 0x73, 0x2e, 0x76, 0x32, 0x2e, 0x73, 0x65, 0x73, 0x73, 0x69, + 0x6f, 0x6e, 0x2e, 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x56, 0x65, 0x72, 0x69, 0x66, + 0x69, 0x63, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x48, 0x65, 0x61, 0x64, 0x65, 0x72, 0x52, 0x0c, 0x76, + 0x65, 0x72, 0x69, 0x66, 0x79, 0x48, 0x65, 0x61, 0x64, 0x65, 0x72, 0x1a, 0x3d, 0x0a, 0x04, 0x42, + 0x6f, 0x64, 0x79, 0x12, 0x35, 0x0a, 0x09, 0x6f, 0x62, 0x6a, 0x65, 0x63, 0x74, 0x5f, 0x69, 0x64, + 0x18, 0x01, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x18, 0x2e, 0x6e, 0x65, 0x6f, 0x2e, 0x66, 0x73, 0x2e, + 0x76, 0x32, 0x2e, 0x72, 0x65, 0x66, 0x73, 0x2e, 0x4f, 0x62, 0x6a, 0x65, 0x63, 0x74, 0x49, 0x44, + 0x52, 0x08, 0x6f, 0x62, 0x6a, 0x65, 0x63, 0x74, 0x49, 0x64, 0x22, 0x9e, 0x02, 0x0a, 0x0d, 0x44, + 0x65, 0x6c, 0x65, 0x74, 0x65, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x12, 0x38, 0x0a, 0x04, + 0x62, 0x6f, 0x64, 0x79, 0x18, 0x01, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x24, 0x2e, 0x6e, 0x65, 0x6f, + 0x2e, 0x66, 0x73, 0x2e, 0x76, 0x32, 0x2e, 0x6f, 0x62, 0x6a, 0x65, 0x63, 0x74, 0x2e, 0x44, 0x65, + 0x6c, 0x65, 0x74, 0x65, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x2e, 0x42, 0x6f, 0x64, 0x79, + 0x52, 0x04, 0x62, 0x6f, 0x64, 0x79, 0x12, 0x45, 0x0a, 0x0b, 0x6d, 0x65, 0x74, 0x61, 0x5f, 0x68, + 0x65, 0x61, 0x64, 0x65, 0x72, 0x18, 0x02, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x24, 0x2e, 0x6e, 0x65, + 0x6f, 0x2e, 0x66, 0x73, 0x2e, 0x76, 0x32, 0x2e, 0x73, 0x65, 0x73, 0x73, 0x69, 0x6f, 0x6e, 0x2e, + 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x4d, 0x65, 0x74, 0x61, 0x48, 0x65, 0x61, 0x64, 0x65, + 0x72, 0x52, 0x0a, 0x6d, 0x65, 0x74, 0x61, 0x48, 0x65, 0x61, 0x64, 0x65, 0x72, 0x12, 0x51, 0x0a, + 0x0d, 0x76, 0x65, 0x72, 0x69, 0x66, 0x79, 0x5f, 0x68, 0x65, 0x61, 0x64, 0x65, 0x72, 0x18, 0x03, + 0x20, 0x01, 0x28, 0x0b, 0x32, 0x2c, 0x2e, 0x6e, 0x65, 0x6f, 0x2e, 0x66, 0x73, 0x2e, 0x76, 0x32, + 0x2e, 0x73, 0x65, 0x73, 0x73, 0x69, 0x6f, 0x6e, 0x2e, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, + 0x56, 0x65, 0x72, 0x69, 0x66, 0x69, 0x63, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x48, 0x65, 0x61, 0x64, + 0x65, 0x72, 0x52, 0x0c, 0x76, 0x65, 0x72, 0x69, 0x66, 0x79, 0x48, 0x65, 0x61, 0x64, 0x65, 0x72, + 0x1a, 0x39, 0x0a, 0x04, 0x42, 0x6f, 0x64, 0x79, 0x12, 0x31, 0x0a, 0x07, 0x61, 0x64, 0x64, 0x72, + 0x65, 0x73, 0x73, 0x18, 0x01, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x17, 0x2e, 0x6e, 0x65, 0x6f, 0x2e, + 0x66, 0x73, 0x2e, 0x76, 0x32, 0x2e, 0x72, 0x65, 0x66, 0x73, 0x2e, 0x41, 0x64, 0x64, 0x72, 0x65, + 0x73, 0x73, 0x52, 0x07, 0x61, 0x64, 0x64, 0x72, 0x65, 0x73, 0x73, 0x22, 0xa6, 0x02, 0x0a, 0x0e, + 0x44, 0x65, 0x6c, 0x65, 0x74, 0x65, 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x12, 0x39, + 0x0a, 0x04, 0x62, 0x6f, 0x64, 0x79, 0x18, 0x01, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x25, 0x2e, 0x6e, + 0x65, 0x6f, 0x2e, 0x66, 0x73, 0x2e, 0x76, 0x32, 0x2e, 0x6f, 0x62, 0x6a, 0x65, 0x63, 0x74, 0x2e, + 0x44, 0x65, 0x6c, 0x65, 0x74, 0x65, 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x2e, 0x42, + 0x6f, 0x64, 0x79, 0x52, 0x04, 0x62, 0x6f, 0x64, 0x79, 0x12, 0x46, 0x0a, 0x0b, 0x6d, 0x65, 0x74, + 0x61, 0x5f, 0x68, 0x65, 0x61, 0x64, 0x65, 0x72, 0x18, 0x02, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x25, + 0x2e, 0x6e, 0x65, 0x6f, 0x2e, 0x66, 0x73, 0x2e, 0x76, 0x32, 0x2e, 0x73, 0x65, 0x73, 0x73, 0x69, + 0x6f, 0x6e, 0x2e, 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x4d, 0x65, 0x74, 0x61, 0x48, + 0x65, 0x61, 0x64, 0x65, 0x72, 0x52, 0x0a, 0x6d, 0x65, 0x74, 0x61, 0x48, 0x65, 0x61, 0x64, 0x65, + 0x72, 0x12, 0x52, 0x0a, 0x0d, 0x76, 0x65, 0x72, 0x69, 0x66, 0x79, 0x5f, 0x68, 0x65, 0x61, 0x64, + 0x65, 0x72, 0x18, 0x03, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x2d, 0x2e, 0x6e, 0x65, 0x6f, 0x2e, 0x66, + 0x73, 0x2e, 0x76, 0x32, 0x2e, 0x73, 0x65, 0x73, 0x73, 0x69, 0x6f, 0x6e, 0x2e, 0x52, 0x65, 0x73, + 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x56, 0x65, 0x72, 0x69, 0x66, 0x69, 0x63, 0x61, 0x74, 0x69, 0x6f, + 0x6e, 0x48, 0x65, 0x61, 0x64, 0x65, 0x72, 0x52, 0x0c, 0x76, 0x65, 0x72, 0x69, 0x66, 0x79, 0x48, + 0x65, 0x61, 0x64, 0x65, 0x72, 0x1a, 0x3d, 0x0a, 0x04, 0x42, 0x6f, 0x64, 0x79, 0x12, 0x35, 0x0a, + 0x09, 0x74, 0x6f, 0x6d, 0x62, 0x73, 0x74, 0x6f, 0x6e, 0x65, 0x18, 0x01, 0x20, 0x01, 0x28, 0x0b, + 0x32, 0x17, 0x2e, 0x6e, 0x65, 0x6f, 0x2e, 0x66, 0x73, 0x2e, 0x76, 0x32, 0x2e, 0x72, 0x65, 0x66, + 0x73, 0x2e, 0x41, 0x64, 0x64, 0x72, 0x65, 0x73, 0x73, 0x52, 0x09, 0x74, 0x6f, 0x6d, 0x62, 0x73, + 0x74, 0x6f, 0x6e, 0x65, 0x22, 0xc9, 0x02, 0x0a, 0x0b, 0x48, 0x65, 0x61, 0x64, 0x52, 0x65, 0x71, + 0x75, 0x65, 0x73, 0x74, 0x12, 0x36, 0x0a, 0x04, 0x62, 0x6f, 0x64, 0x79, 0x18, 0x01, 0x20, 0x01, + 0x28, 0x0b, 0x32, 0x22, 0x2e, 0x6e, 0x65, 0x6f, 0x2e, 0x66, 0x73, 0x2e, 0x76, 0x32, 0x2e, 0x6f, + 0x62, 0x6a, 0x65, 0x63, 0x74, 0x2e, 0x48, 0x65, 0x61, 0x64, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, + 0x74, 0x2e, 0x42, 0x6f, 0x64, 0x79, 0x52, 0x04, 0x62, 0x6f, 0x64, 0x79, 0x12, 0x45, 0x0a, 0x0b, + 0x6d, 0x65, 0x74, 0x61, 0x5f, 0x68, 0x65, 0x61, 0x64, 0x65, 0x72, 0x18, 0x02, 0x20, 0x01, 0x28, + 0x0b, 0x32, 0x24, 0x2e, 0x6e, 0x65, 0x6f, 0x2e, 0x66, 0x73, 0x2e, 0x76, 0x32, 0x2e, 0x73, 0x65, + 0x73, 0x73, 0x69, 0x6f, 0x6e, 0x2e, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x4d, 0x65, 0x74, + 0x61, 0x48, 0x65, 0x61, 0x64, 0x65, 0x72, 0x52, 0x0a, 0x6d, 0x65, 0x74, 0x61, 0x48, 0x65, 0x61, + 0x64, 0x65, 0x72, 0x12, 0x51, 0x0a, 0x0d, 0x76, 0x65, 0x72, 0x69, 0x66, 0x79, 0x5f, 0x68, 0x65, + 0x61, 0x64, 0x65, 0x72, 0x18, 0x03, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x2c, 0x2e, 0x6e, 0x65, 0x6f, + 0x2e, 0x66, 0x73, 0x2e, 0x76, 0x32, 0x2e, 0x73, 0x65, 0x73, 0x73, 0x69, 0x6f, 0x6e, 0x2e, 0x52, + 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x56, 0x65, 0x72, 0x69, 0x66, 0x69, 0x63, 0x61, 0x74, 0x69, + 0x6f, 0x6e, 0x48, 0x65, 0x61, 0x64, 0x65, 0x72, 0x52, 0x0c, 0x76, 0x65, 0x72, 0x69, 0x66, 0x79, + 0x48, 0x65, 0x61, 0x64, 0x65, 0x72, 0x1a, 0x68, 0x0a, 0x04, 0x42, 0x6f, 0x64, 0x79, 0x12, 0x31, + 0x0a, 0x07, 0x61, 0x64, 0x64, 0x72, 0x65, 0x73, 0x73, 0x18, 0x01, 0x20, 0x01, 0x28, 0x0b, 0x32, + 0x17, 0x2e, 0x6e, 0x65, 0x6f, 0x2e, 0x66, 0x73, 0x2e, 0x76, 0x32, 0x2e, 0x72, 0x65, 0x66, 0x73, + 0x2e, 0x41, 0x64, 0x64, 0x72, 0x65, 0x73, 0x73, 0x52, 0x07, 0x61, 0x64, 0x64, 0x72, 0x65, 0x73, + 0x73, 0x12, 0x1b, 0x0a, 0x09, 0x6d, 0x61, 0x69, 0x6e, 0x5f, 0x6f, 0x6e, 0x6c, 0x79, 0x18, 0x02, + 0x20, 0x01, 0x28, 0x08, 0x52, 0x08, 0x6d, 0x61, 0x69, 0x6e, 0x4f, 0x6e, 0x6c, 0x79, 0x12, 0x10, + 0x0a, 0x03, 0x72, 0x61, 0x77, 0x18, 0x03, 0x20, 0x01, 0x28, 0x08, 0x52, 0x03, 0x72, 0x61, 0x77, + 0x22, 0x80, 0x01, 0x0a, 0x13, 0x48, 0x65, 0x61, 0x64, 0x65, 0x72, 0x57, 0x69, 0x74, 0x68, 0x53, + 0x69, 0x67, 0x6e, 0x61, 0x74, 0x75, 0x72, 0x65, 0x12, 0x30, 0x0a, 0x06, 0x68, 0x65, 0x61, 0x64, + 0x65, 0x72, 0x18, 0x01, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x18, 0x2e, 0x6e, 0x65, 0x6f, 0x2e, 0x66, + 0x73, 0x2e, 0x76, 0x32, 0x2e, 0x6f, 0x62, 0x6a, 0x65, 0x63, 0x74, 0x2e, 0x48, 0x65, 0x61, 0x64, + 0x65, 0x72, 0x52, 0x06, 0x68, 0x65, 0x61, 0x64, 0x65, 0x72, 0x12, 0x37, 0x0a, 0x09, 0x73, 0x69, + 0x67, 0x6e, 0x61, 0x74, 0x75, 0x72, 0x65, 0x18, 0x02, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x19, 0x2e, + 0x6e, 0x65, 0x6f, 0x2e, 0x66, 0x73, 0x2e, 0x76, 0x32, 0x2e, 0x72, 0x65, 0x66, 0x73, 0x2e, 0x53, + 0x69, 0x67, 0x6e, 0x61, 0x74, 0x75, 0x72, 0x65, 0x52, 0x09, 0x73, 0x69, 0x67, 0x6e, 0x61, 0x74, + 0x75, 0x72, 0x65, 0x22, 0xb7, 0x03, 0x0a, 0x0c, 0x48, 0x65, 0x61, 0x64, 0x52, 0x65, 0x73, 0x70, + 0x6f, 0x6e, 0x73, 0x65, 0x12, 0x37, 0x0a, 0x04, 0x62, 0x6f, 0x64, 0x79, 0x18, 0x01, 0x20, 0x01, + 0x28, 0x0b, 0x32, 0x23, 0x2e, 0x6e, 0x65, 0x6f, 0x2e, 0x66, 0x73, 0x2e, 0x76, 0x32, 0x2e, 0x6f, + 0x62, 0x6a, 0x65, 0x63, 0x74, 0x2e, 0x48, 0x65, 0x61, 0x64, 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, + 0x73, 0x65, 0x2e, 0x42, 0x6f, 0x64, 0x79, 0x52, 0x04, 0x62, 0x6f, 0x64, 0x79, 0x12, 0x46, 0x0a, + 0x0b, 0x6d, 0x65, 0x74, 0x61, 0x5f, 0x68, 0x65, 0x61, 0x64, 0x65, 0x72, 0x18, 0x02, 0x20, 0x01, + 0x28, 0x0b, 0x32, 0x25, 0x2e, 0x6e, 0x65, 0x6f, 0x2e, 0x66, 0x73, 0x2e, 0x76, 0x32, 0x2e, 0x73, + 0x65, 0x73, 0x73, 0x69, 0x6f, 0x6e, 0x2e, 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x4d, + 0x65, 0x74, 0x61, 0x48, 0x65, 0x61, 0x64, 0x65, 0x72, 0x52, 0x0a, 0x6d, 0x65, 0x74, 0x61, 0x48, + 0x65, 0x61, 0x64, 0x65, 0x72, 0x12, 0x52, 0x0a, 0x0d, 0x76, 0x65, 0x72, 0x69, 0x66, 0x79, 0x5f, + 0x68, 0x65, 0x61, 0x64, 0x65, 0x72, 0x18, 0x03, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x2d, 0x2e, 0x6e, + 0x65, 0x6f, 0x2e, 0x66, 0x73, 0x2e, 0x76, 0x32, 0x2e, 0x73, 0x65, 0x73, 0x73, 0x69, 0x6f, 0x6e, + 0x2e, 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x56, 0x65, 0x72, 0x69, 0x66, 0x69, 0x63, + 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x48, 0x65, 0x61, 0x64, 0x65, 0x72, 0x52, 0x0c, 0x76, 0x65, 0x72, + 0x69, 0x66, 0x79, 0x48, 0x65, 0x61, 0x64, 0x65, 0x72, 0x1a, 0xd1, 0x01, 0x0a, 0x04, 0x42, 0x6f, + 0x64, 0x79, 0x12, 0x3f, 0x0a, 0x06, 0x68, 0x65, 0x61, 0x64, 0x65, 0x72, 0x18, 0x01, 0x20, 0x01, + 0x28, 0x0b, 0x32, 0x25, 0x2e, 0x6e, 0x65, 0x6f, 0x2e, 0x66, 0x73, 0x2e, 0x76, 0x32, 0x2e, 0x6f, + 0x62, 0x6a, 0x65, 0x63, 0x74, 0x2e, 0x48, 0x65, 0x61, 0x64, 0x65, 0x72, 0x57, 0x69, 0x74, 0x68, + 0x53, 0x69, 0x67, 0x6e, 0x61, 0x74, 0x75, 0x72, 0x65, 0x48, 0x00, 0x52, 0x06, 0x68, 0x65, 0x61, + 0x64, 0x65, 0x72, 0x12, 0x42, 0x0a, 0x0c, 0x73, 0x68, 0x6f, 0x72, 0x74, 0x5f, 0x68, 0x65, 0x61, + 0x64, 0x65, 0x72, 0x18, 0x02, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x1d, 0x2e, 0x6e, 0x65, 0x6f, 0x2e, + 0x66, 0x73, 0x2e, 0x76, 0x32, 0x2e, 0x6f, 0x62, 0x6a, 0x65, 0x63, 0x74, 0x2e, 0x53, 0x68, 0x6f, + 0x72, 0x74, 0x48, 0x65, 0x61, 0x64, 0x65, 0x72, 0x48, 0x00, 0x52, 0x0b, 0x73, 0x68, 0x6f, 0x72, + 0x74, 0x48, 0x65, 0x61, 0x64, 0x65, 0x72, 0x12, 0x3c, 0x0a, 0x0a, 0x73, 0x70, 0x6c, 0x69, 0x74, + 0x5f, 0x69, 0x6e, 0x66, 0x6f, 0x18, 0x03, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x1b, 0x2e, 0x6e, 0x65, + 0x6f, 0x2e, 0x66, 0x73, 0x2e, 0x76, 0x32, 0x2e, 0x6f, 0x62, 0x6a, 0x65, 0x63, 0x74, 0x2e, 0x53, + 0x70, 0x6c, 0x69, 0x74, 0x49, 0x6e, 0x66, 0x6f, 0x48, 0x00, 0x52, 0x09, 0x73, 0x70, 0x6c, 0x69, + 0x74, 0x49, 0x6e, 0x66, 0x6f, 0x42, 0x06, 0x0a, 0x04, 0x68, 0x65, 0x61, 0x64, 0x22, 0xfb, 0x03, + 0x0a, 0x0d, 0x53, 0x65, 0x61, 0x72, 0x63, 0x68, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x12, + 0x38, 0x0a, 0x04, 0x62, 0x6f, 0x64, 0x79, 0x18, 0x01, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x24, 0x2e, + 0x6e, 0x65, 0x6f, 0x2e, 0x66, 0x73, 0x2e, 0x76, 0x32, 0x2e, 0x6f, 0x62, 0x6a, 0x65, 0x63, 0x74, + 0x2e, 0x53, 0x65, 0x61, 0x72, 0x63, 0x68, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x2e, 0x42, + 0x6f, 0x64, 0x79, 0x52, 0x04, 0x62, 0x6f, 0x64, 0x79, 0x12, 0x45, 0x0a, 0x0b, 0x6d, 0x65, 0x74, + 0x61, 0x5f, 0x68, 0x65, 0x61, 0x64, 0x65, 0x72, 0x18, 0x02, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x24, + 0x2e, 0x6e, 0x65, 0x6f, 0x2e, 0x66, 0x73, 0x2e, 0x76, 0x32, 0x2e, 0x73, 0x65, 0x73, 0x73, 0x69, + 0x6f, 0x6e, 0x2e, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x4d, 0x65, 0x74, 0x61, 0x48, 0x65, + 0x61, 0x64, 0x65, 0x72, 0x52, 0x0a, 0x6d, 0x65, 0x74, 0x61, 0x48, 0x65, 0x61, 0x64, 0x65, 0x72, + 0x12, 0x51, 0x0a, 0x0d, 0x76, 0x65, 0x72, 0x69, 0x66, 0x79, 0x5f, 0x68, 0x65, 0x61, 0x64, 0x65, + 0x72, 0x18, 0x03, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x2c, 0x2e, 0x6e, 0x65, 0x6f, 0x2e, 0x66, 0x73, + 0x2e, 0x76, 0x32, 0x2e, 0x73, 0x65, 0x73, 0x73, 0x69, 0x6f, 0x6e, 0x2e, 0x52, 0x65, 0x71, 0x75, + 0x65, 0x73, 0x74, 0x56, 0x65, 0x72, 0x69, 0x66, 0x69, 0x63, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x48, + 0x65, 0x61, 0x64, 0x65, 0x72, 0x52, 0x0c, 0x76, 0x65, 0x72, 0x69, 0x66, 0x79, 0x48, 0x65, 0x61, + 0x64, 0x65, 0x72, 0x1a, 0x95, 0x02, 0x0a, 0x04, 0x42, 0x6f, 0x64, 0x79, 0x12, 0x3e, 0x0a, 0x0c, + 0x63, 0x6f, 0x6e, 0x74, 0x61, 0x69, 0x6e, 0x65, 0x72, 0x5f, 0x69, 0x64, 0x18, 0x01, 0x20, 0x01, + 0x28, 0x0b, 0x32, 0x1b, 0x2e, 0x6e, 0x65, 0x6f, 0x2e, 0x66, 0x73, 0x2e, 0x76, 0x32, 0x2e, 0x72, + 0x65, 0x66, 0x73, 0x2e, 0x43, 0x6f, 0x6e, 0x74, 0x61, 0x69, 0x6e, 0x65, 0x72, 0x49, 0x44, 0x52, + 0x0b, 0x63, 0x6f, 0x6e, 0x74, 0x61, 0x69, 0x6e, 0x65, 0x72, 0x49, 0x64, 0x12, 0x18, 0x0a, 0x07, + 0x76, 0x65, 0x72, 0x73, 0x69, 0x6f, 0x6e, 0x18, 0x02, 0x20, 0x01, 0x28, 0x0d, 0x52, 0x07, 0x76, + 0x65, 0x72, 0x73, 0x69, 0x6f, 0x6e, 0x12, 0x45, 0x0a, 0x07, 0x66, 0x69, 0x6c, 0x74, 0x65, 0x72, + 0x73, 0x18, 0x03, 0x20, 0x03, 0x28, 0x0b, 0x32, 0x2b, 0x2e, 0x6e, 0x65, 0x6f, 0x2e, 0x66, 0x73, + 0x2e, 0x76, 0x32, 0x2e, 0x6f, 0x62, 0x6a, 0x65, 0x63, 0x74, 0x2e, 0x53, 0x65, 0x61, 0x72, 0x63, + 0x68, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x2e, 0x42, 0x6f, 0x64, 0x79, 0x2e, 0x46, 0x69, + 0x6c, 0x74, 0x65, 0x72, 0x52, 0x07, 0x66, 0x69, 0x6c, 0x74, 0x65, 0x72, 0x73, 0x1a, 0x6c, 0x0a, + 0x06, 0x46, 0x69, 0x6c, 0x74, 0x65, 0x72, 0x12, 0x3a, 0x0a, 0x0a, 0x6d, 0x61, 0x74, 0x63, 0x68, + 0x5f, 0x74, 0x79, 0x70, 0x65, 0x18, 0x01, 0x20, 0x01, 0x28, 0x0e, 0x32, 0x1b, 0x2e, 0x6e, 0x65, + 0x6f, 0x2e, 0x66, 0x73, 0x2e, 0x76, 0x32, 0x2e, 0x6f, 0x62, 0x6a, 0x65, 0x63, 0x74, 0x2e, 0x4d, + 0x61, 0x74, 0x63, 0x68, 0x54, 0x79, 0x70, 0x65, 0x52, 0x09, 0x6d, 0x61, 0x74, 0x63, 0x68, 0x54, + 0x79, 0x70, 0x65, 0x12, 0x10, 0x0a, 0x03, 0x6b, 0x65, 0x79, 0x18, 0x02, 0x20, 0x01, 0x28, 0x09, + 0x52, 0x03, 0x6b, 0x65, 0x79, 0x12, 0x14, 0x0a, 0x05, 0x76, 0x61, 0x6c, 0x75, 0x65, 0x18, 0x03, + 0x20, 0x01, 0x28, 0x09, 0x52, 0x05, 0x76, 0x61, 0x6c, 0x75, 0x65, 0x22, 0xa2, 0x02, 0x0a, 0x0e, + 0x53, 0x65, 0x61, 0x72, 0x63, 0x68, 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x12, 0x39, + 0x0a, 0x04, 0x62, 0x6f, 0x64, 0x79, 0x18, 0x01, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x25, 0x2e, 0x6e, + 0x65, 0x6f, 0x2e, 0x66, 0x73, 0x2e, 0x76, 0x32, 0x2e, 0x6f, 0x62, 0x6a, 0x65, 0x63, 0x74, 0x2e, + 0x53, 0x65, 0x61, 0x72, 0x63, 0x68, 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x2e, 0x42, + 0x6f, 0x64, 0x79, 0x52, 0x04, 0x62, 0x6f, 0x64, 0x79, 0x12, 0x46, 0x0a, 0x0b, 0x6d, 0x65, 0x74, + 0x61, 0x5f, 0x68, 0x65, 0x61, 0x64, 0x65, 0x72, 0x18, 0x02, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x25, + 0x2e, 0x6e, 0x65, 0x6f, 0x2e, 0x66, 0x73, 0x2e, 0x76, 0x32, 0x2e, 0x73, 0x65, 0x73, 0x73, 0x69, + 0x6f, 0x6e, 0x2e, 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x4d, 0x65, 0x74, 0x61, 0x48, + 0x65, 0x61, 0x64, 0x65, 0x72, 0x52, 0x0a, 0x6d, 0x65, 0x74, 0x61, 0x48, 0x65, 0x61, 0x64, 0x65, + 0x72, 0x12, 0x52, 0x0a, 0x0d, 0x76, 0x65, 0x72, 0x69, 0x66, 0x79, 0x5f, 0x68, 0x65, 0x61, 0x64, + 0x65, 0x72, 0x18, 0x03, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x2d, 0x2e, 0x6e, 0x65, 0x6f, 0x2e, 0x66, + 0x73, 0x2e, 0x76, 0x32, 0x2e, 0x73, 0x65, 0x73, 0x73, 0x69, 0x6f, 0x6e, 0x2e, 0x52, 0x65, 0x73, + 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x56, 0x65, 0x72, 0x69, 0x66, 0x69, 0x63, 0x61, 0x74, 0x69, 0x6f, + 0x6e, 0x48, 0x65, 0x61, 0x64, 0x65, 0x72, 0x52, 0x0c, 0x76, 0x65, 0x72, 0x69, 0x66, 0x79, 0x48, + 0x65, 0x61, 0x64, 0x65, 0x72, 0x1a, 0x39, 0x0a, 0x04, 0x42, 0x6f, 0x64, 0x79, 0x12, 0x31, 0x0a, + 0x07, 0x69, 0x64, 0x5f, 0x6c, 0x69, 0x73, 0x74, 0x18, 0x01, 0x20, 0x03, 0x28, 0x0b, 0x32, 0x18, + 0x2e, 0x6e, 0x65, 0x6f, 0x2e, 0x66, 0x73, 0x2e, 0x76, 0x32, 0x2e, 0x72, 0x65, 0x66, 0x73, 0x2e, + 0x4f, 0x62, 0x6a, 0x65, 0x63, 0x74, 0x49, 0x44, 0x52, 0x06, 0x69, 0x64, 0x4c, 0x69, 0x73, 0x74, + 0x22, 0x37, 0x0a, 0x05, 0x52, 0x61, 0x6e, 0x67, 0x65, 0x12, 0x16, 0x0a, 0x06, 0x6f, 0x66, 0x66, + 0x73, 0x65, 0x74, 0x18, 0x01, 0x20, 0x01, 0x28, 0x04, 0x52, 0x06, 0x6f, 0x66, 0x66, 0x73, 0x65, + 0x74, 0x12, 0x16, 0x0a, 0x06, 0x6c, 0x65, 0x6e, 0x67, 0x74, 0x68, 0x18, 0x02, 0x20, 0x01, 0x28, + 0x04, 0x52, 0x06, 0x6c, 0x65, 0x6e, 0x67, 0x74, 0x68, 0x22, 0xe3, 0x02, 0x0a, 0x0f, 0x47, 0x65, + 0x74, 0x52, 0x61, 0x6e, 0x67, 0x65, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x12, 0x3a, 0x0a, + 0x04, 0x62, 0x6f, 0x64, 0x79, 0x18, 0x01, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x26, 0x2e, 0x6e, 0x65, + 0x6f, 0x2e, 0x66, 0x73, 0x2e, 0x76, 0x32, 0x2e, 0x6f, 0x62, 0x6a, 0x65, 0x63, 0x74, 0x2e, 0x47, + 0x65, 0x74, 0x52, 0x61, 0x6e, 0x67, 0x65, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x2e, 0x42, + 0x6f, 0x64, 0x79, 0x52, 0x04, 0x62, 0x6f, 0x64, 0x79, 0x12, 0x45, 0x0a, 0x0b, 0x6d, 0x65, 0x74, + 0x61, 0x5f, 0x68, 0x65, 0x61, 0x64, 0x65, 0x72, 0x18, 0x02, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x24, + 0x2e, 0x6e, 0x65, 0x6f, 0x2e, 0x66, 0x73, 0x2e, 0x76, 0x32, 0x2e, 0x73, 0x65, 0x73, 0x73, 0x69, + 0x6f, 0x6e, 0x2e, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x4d, 0x65, 0x74, 0x61, 0x48, 0x65, + 0x61, 0x64, 0x65, 0x72, 0x52, 0x0a, 0x6d, 0x65, 0x74, 0x61, 0x48, 0x65, 0x61, 0x64, 0x65, 0x72, + 0x12, 0x51, 0x0a, 0x0d, 0x76, 0x65, 0x72, 0x69, 0x66, 0x79, 0x5f, 0x68, 0x65, 0x61, 0x64, 0x65, + 0x72, 0x18, 0x03, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x2c, 0x2e, 0x6e, 0x65, 0x6f, 0x2e, 0x66, 0x73, + 0x2e, 0x76, 0x32, 0x2e, 0x73, 0x65, 0x73, 0x73, 0x69, 0x6f, 0x6e, 0x2e, 0x52, 0x65, 0x71, 0x75, + 0x65, 0x73, 0x74, 0x56, 0x65, 0x72, 0x69, 0x66, 0x69, 0x63, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x48, + 0x65, 0x61, 0x64, 0x65, 0x72, 0x52, 0x0c, 0x76, 0x65, 0x72, 0x69, 0x66, 0x79, 0x48, 0x65, 0x61, + 0x64, 0x65, 0x72, 0x1a, 0x7a, 0x0a, 0x04, 0x42, 0x6f, 0x64, 0x79, 0x12, 0x31, 0x0a, 0x07, 0x61, + 0x64, 0x64, 0x72, 0x65, 0x73, 0x73, 0x18, 0x01, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x17, 0x2e, 0x6e, + 0x65, 0x6f, 0x2e, 0x66, 0x73, 0x2e, 0x76, 0x32, 0x2e, 0x72, 0x65, 0x66, 0x73, 0x2e, 0x41, 0x64, + 0x64, 0x72, 0x65, 0x73, 0x73, 0x52, 0x07, 0x61, 0x64, 0x64, 0x72, 0x65, 0x73, 0x73, 0x12, 0x2d, + 0x0a, 0x05, 0x72, 0x61, 0x6e, 0x67, 0x65, 0x18, 0x02, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x17, 0x2e, + 0x6e, 0x65, 0x6f, 0x2e, 0x66, 0x73, 0x2e, 0x76, 0x32, 0x2e, 0x6f, 0x62, 0x6a, 0x65, 0x63, 0x74, + 0x2e, 0x52, 0x61, 0x6e, 0x67, 0x65, 0x52, 0x05, 0x72, 0x61, 0x6e, 0x67, 0x65, 0x12, 0x10, 0x0a, + 0x03, 0x72, 0x61, 0x77, 0x18, 0x03, 0x20, 0x01, 0x28, 0x08, 0x52, 0x03, 0x72, 0x61, 0x77, 0x22, + 0xd7, 0x02, 0x0a, 0x10, 0x47, 0x65, 0x74, 0x52, 0x61, 0x6e, 0x67, 0x65, 0x52, 0x65, 0x73, 0x70, + 0x6f, 0x6e, 0x73, 0x65, 0x12, 0x3b, 0x0a, 0x04, 0x62, 0x6f, 0x64, 0x79, 0x18, 0x01, 0x20, 0x01, + 0x28, 0x0b, 0x32, 0x27, 0x2e, 0x6e, 0x65, 0x6f, 0x2e, 0x66, 0x73, 0x2e, 0x76, 0x32, 0x2e, 0x6f, + 0x62, 0x6a, 0x65, 0x63, 0x74, 0x2e, 0x47, 0x65, 0x74, 0x52, 0x61, 0x6e, 0x67, 0x65, 0x52, 0x65, + 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x2e, 0x42, 0x6f, 0x64, 0x79, 0x52, 0x04, 0x62, 0x6f, 0x64, + 0x79, 0x12, 0x46, 0x0a, 0x0b, 0x6d, 0x65, 0x74, 0x61, 0x5f, 0x68, 0x65, 0x61, 0x64, 0x65, 0x72, + 0x18, 0x02, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x25, 0x2e, 0x6e, 0x65, 0x6f, 0x2e, 0x66, 0x73, 0x2e, + 0x76, 0x32, 0x2e, 0x73, 0x65, 0x73, 0x73, 0x69, 0x6f, 0x6e, 0x2e, 0x52, 0x65, 0x73, 0x70, 0x6f, + 0x6e, 0x73, 0x65, 0x4d, 0x65, 0x74, 0x61, 0x48, 0x65, 0x61, 0x64, 0x65, 0x72, 0x52, 0x0a, 0x6d, + 0x65, 0x74, 0x61, 0x48, 0x65, 0x61, 0x64, 0x65, 0x72, 0x12, 0x52, 0x0a, 0x0d, 0x76, 0x65, 0x72, + 0x69, 0x66, 0x79, 0x5f, 0x68, 0x65, 0x61, 0x64, 0x65, 0x72, 0x18, 0x03, 0x20, 0x01, 0x28, 0x0b, + 0x32, 0x2d, 0x2e, 0x6e, 0x65, 0x6f, 0x2e, 0x66, 0x73, 0x2e, 0x76, 0x32, 0x2e, 0x73, 0x65, 0x73, + 0x73, 0x69, 0x6f, 0x6e, 0x2e, 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x56, 0x65, 0x72, + 0x69, 0x66, 0x69, 0x63, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x48, 0x65, 0x61, 0x64, 0x65, 0x72, 0x52, + 0x0c, 0x76, 0x65, 0x72, 0x69, 0x66, 0x79, 0x48, 0x65, 0x61, 0x64, 0x65, 0x72, 0x1a, 0x6a, 0x0a, + 0x04, 0x42, 0x6f, 0x64, 0x79, 0x12, 0x16, 0x0a, 0x05, 0x63, 0x68, 0x75, 0x6e, 0x6b, 0x18, 0x01, + 0x20, 0x01, 0x28, 0x0c, 0x48, 0x00, 0x52, 0x05, 0x63, 0x68, 0x75, 0x6e, 0x6b, 0x12, 0x3c, 0x0a, + 0x0a, 0x73, 0x70, 0x6c, 0x69, 0x74, 0x5f, 0x69, 0x6e, 0x66, 0x6f, 0x18, 0x02, 0x20, 0x01, 0x28, + 0x0b, 0x32, 0x1b, 0x2e, 0x6e, 0x65, 0x6f, 0x2e, 0x66, 0x73, 0x2e, 0x76, 0x32, 0x2e, 0x6f, 0x62, + 0x6a, 0x65, 0x63, 0x74, 0x2e, 0x53, 0x70, 0x6c, 0x69, 0x74, 0x49, 0x6e, 0x66, 0x6f, 0x48, 0x00, + 0x52, 0x09, 0x73, 0x70, 0x6c, 0x69, 0x74, 0x49, 0x6e, 0x66, 0x6f, 0x42, 0x0c, 0x0a, 0x0a, 0x72, + 0x61, 0x6e, 0x67, 0x65, 0x5f, 0x70, 0x61, 0x72, 0x74, 0x22, 0xa2, 0x03, 0x0a, 0x13, 0x47, 0x65, + 0x74, 0x52, 0x61, 0x6e, 0x67, 0x65, 0x48, 0x61, 0x73, 0x68, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, + 0x74, 0x12, 0x3e, 0x0a, 0x04, 0x62, 0x6f, 0x64, 0x79, 0x18, 0x01, 0x20, 0x01, 0x28, 0x0b, 0x32, + 0x2a, 0x2e, 0x6e, 0x65, 0x6f, 0x2e, 0x66, 0x73, 0x2e, 0x76, 0x32, 0x2e, 0x6f, 0x62, 0x6a, 0x65, + 0x63, 0x74, 0x2e, 0x47, 0x65, 0x74, 0x52, 0x61, 0x6e, 0x67, 0x65, 0x48, 0x61, 0x73, 0x68, 0x52, + 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x2e, 0x42, 0x6f, 0x64, 0x79, 0x52, 0x04, 0x62, 0x6f, 0x64, + 0x79, 0x12, 0x45, 0x0a, 0x0b, 0x6d, 0x65, 0x74, 0x61, 0x5f, 0x68, 0x65, 0x61, 0x64, 0x65, 0x72, + 0x18, 0x02, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x24, 0x2e, 0x6e, 0x65, 0x6f, 0x2e, 0x66, 0x73, 0x2e, + 0x76, 0x32, 0x2e, 0x73, 0x65, 0x73, 0x73, 0x69, 0x6f, 0x6e, 0x2e, 0x52, 0x65, 0x71, 0x75, 0x65, + 0x73, 0x74, 0x4d, 0x65, 0x74, 0x61, 0x48, 0x65, 0x61, 0x64, 0x65, 0x72, 0x52, 0x0a, 0x6d, 0x65, + 0x74, 0x61, 0x48, 0x65, 0x61, 0x64, 0x65, 0x72, 0x12, 0x51, 0x0a, 0x0d, 0x76, 0x65, 0x72, 0x69, + 0x66, 0x79, 0x5f, 0x68, 0x65, 0x61, 0x64, 0x65, 0x72, 0x18, 0x03, 0x20, 0x01, 0x28, 0x0b, 0x32, + 0x2c, 0x2e, 0x6e, 0x65, 0x6f, 0x2e, 0x66, 0x73, 0x2e, 0x76, 0x32, 0x2e, 0x73, 0x65, 0x73, 0x73, + 0x69, 0x6f, 0x6e, 0x2e, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x56, 0x65, 0x72, 0x69, 0x66, + 0x69, 0x63, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x48, 0x65, 0x61, 0x64, 0x65, 0x72, 0x52, 0x0c, 0x76, + 0x65, 0x72, 0x69, 0x66, 0x79, 0x48, 0x65, 0x61, 0x64, 0x65, 0x72, 0x1a, 0xb0, 0x01, 0x0a, 0x04, + 0x42, 0x6f, 0x64, 0x79, 0x12, 0x31, 0x0a, 0x07, 0x61, 0x64, 0x64, 0x72, 0x65, 0x73, 0x73, 0x18, + 0x01, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x17, 0x2e, 0x6e, 0x65, 0x6f, 0x2e, 0x66, 0x73, 0x2e, 0x76, + 0x32, 0x2e, 0x72, 0x65, 0x66, 0x73, 0x2e, 0x41, 0x64, 0x64, 0x72, 0x65, 0x73, 0x73, 0x52, 0x07, + 0x61, 0x64, 0x64, 0x72, 0x65, 0x73, 0x73, 0x12, 0x2f, 0x0a, 0x06, 0x72, 0x61, 0x6e, 0x67, 0x65, + 0x73, 0x18, 0x02, 0x20, 0x03, 0x28, 0x0b, 0x32, 0x17, 0x2e, 0x6e, 0x65, 0x6f, 0x2e, 0x66, 0x73, + 0x2e, 0x76, 0x32, 0x2e, 0x6f, 0x62, 0x6a, 0x65, 0x63, 0x74, 0x2e, 0x52, 0x61, 0x6e, 0x67, 0x65, + 0x52, 0x06, 0x72, 0x61, 0x6e, 0x67, 0x65, 0x73, 0x12, 0x12, 0x0a, 0x04, 0x73, 0x61, 0x6c, 0x74, + 0x18, 0x03, 0x20, 0x01, 0x28, 0x0c, 0x52, 0x04, 0x73, 0x61, 0x6c, 0x74, 0x12, 0x30, 0x0a, 0x04, + 0x74, 0x79, 0x70, 0x65, 0x18, 0x04, 0x20, 0x01, 0x28, 0x0e, 0x32, 0x1c, 0x2e, 0x6e, 0x65, 0x6f, + 0x2e, 0x66, 0x73, 0x2e, 0x76, 0x32, 0x2e, 0x72, 0x65, 0x66, 0x73, 0x2e, 0x43, 0x68, 0x65, 0x63, + 0x6b, 0x73, 0x75, 0x6d, 0x54, 0x79, 0x70, 0x65, 0x52, 0x04, 0x74, 0x79, 0x70, 0x65, 0x22, 0xca, + 0x02, 0x0a, 0x14, 0x47, 0x65, 0x74, 0x52, 0x61, 0x6e, 0x67, 0x65, 0x48, 0x61, 0x73, 0x68, 0x52, + 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x12, 0x3f, 0x0a, 0x04, 0x62, 0x6f, 0x64, 0x79, 0x18, + 0x01, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x2b, 0x2e, 0x6e, 0x65, 0x6f, 0x2e, 0x66, 0x73, 0x2e, 0x76, + 0x32, 0x2e, 0x6f, 0x62, 0x6a, 0x65, 0x63, 0x74, 0x2e, 0x47, 0x65, 0x74, 0x52, 0x61, 0x6e, 0x67, + 0x65, 0x48, 0x61, 0x73, 0x68, 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x2e, 0x42, 0x6f, + 0x64, 0x79, 0x52, 0x04, 0x62, 0x6f, 0x64, 0x79, 0x12, 0x46, 0x0a, 0x0b, 0x6d, 0x65, 0x74, 0x61, + 0x5f, 0x68, 0x65, 0x61, 0x64, 0x65, 0x72, 0x18, 0x02, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x25, 0x2e, + 0x6e, 0x65, 0x6f, 0x2e, 0x66, 0x73, 0x2e, 0x76, 0x32, 0x2e, 0x73, 0x65, 0x73, 0x73, 0x69, 0x6f, + 0x6e, 0x2e, 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x4d, 0x65, 0x74, 0x61, 0x48, 0x65, + 0x61, 0x64, 0x65, 0x72, 0x52, 0x0a, 0x6d, 0x65, 0x74, 0x61, 0x48, 0x65, 0x61, 0x64, 0x65, 0x72, + 0x12, 0x52, 0x0a, 0x0d, 0x76, 0x65, 0x72, 0x69, 0x66, 0x79, 0x5f, 0x68, 0x65, 0x61, 0x64, 0x65, + 0x72, 0x18, 0x03, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x2d, 0x2e, 0x6e, 0x65, 0x6f, 0x2e, 0x66, 0x73, + 0x2e, 0x76, 0x32, 0x2e, 0x73, 0x65, 0x73, 0x73, 0x69, 0x6f, 0x6e, 0x2e, 0x52, 0x65, 0x73, 0x70, + 0x6f, 0x6e, 0x73, 0x65, 0x56, 0x65, 0x72, 0x69, 0x66, 0x69, 0x63, 0x61, 0x74, 0x69, 0x6f, 0x6e, + 0x48, 0x65, 0x61, 0x64, 0x65, 0x72, 0x52, 0x0c, 0x76, 0x65, 0x72, 0x69, 0x66, 0x79, 0x48, 0x65, + 0x61, 0x64, 0x65, 0x72, 0x1a, 0x55, 0x0a, 0x04, 0x42, 0x6f, 0x64, 0x79, 0x12, 0x30, 0x0a, 0x04, + 0x74, 0x79, 0x70, 0x65, 0x18, 0x01, 0x20, 0x01, 0x28, 0x0e, 0x32, 0x1c, 0x2e, 0x6e, 0x65, 0x6f, + 0x2e, 0x66, 0x73, 0x2e, 0x76, 0x32, 0x2e, 0x72, 0x65, 0x66, 0x73, 0x2e, 0x43, 0x68, 0x65, 0x63, + 0x6b, 0x73, 0x75, 0x6d, 0x54, 0x79, 0x70, 0x65, 0x52, 0x04, 0x74, 0x79, 0x70, 0x65, 0x12, 0x1b, + 0x0a, 0x09, 0x68, 0x61, 0x73, 0x68, 0x5f, 0x6c, 0x69, 0x73, 0x74, 0x18, 0x02, 0x20, 0x03, 0x28, + 0x0c, 0x52, 0x08, 0x68, 0x61, 0x73, 0x68, 0x4c, 0x69, 0x73, 0x74, 0x22, 0x7d, 0x0a, 0x10, 0x52, + 0x65, 0x70, 0x6c, 0x69, 0x63, 0x61, 0x74, 0x65, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x12, + 0x30, 0x0a, 0x06, 0x6f, 0x62, 0x6a, 0x65, 0x63, 0x74, 0x18, 0x01, 0x20, 0x01, 0x28, 0x0b, 0x32, + 0x18, 0x2e, 0x6e, 0x65, 0x6f, 0x2e, 0x66, 0x73, 0x2e, 0x76, 0x32, 0x2e, 0x6f, 0x62, 0x6a, 0x65, + 0x63, 0x74, 0x2e, 0x4f, 0x62, 0x6a, 0x65, 0x63, 0x74, 0x52, 0x06, 0x6f, 0x62, 0x6a, 0x65, 0x63, + 0x74, 0x12, 0x37, 0x0a, 0x09, 0x73, 0x69, 0x67, 0x6e, 0x61, 0x74, 0x75, 0x72, 0x65, 0x18, 0x02, + 0x20, 0x01, 0x28, 0x0b, 0x32, 0x19, 0x2e, 0x6e, 0x65, 0x6f, 0x2e, 0x66, 0x73, 0x2e, 0x76, 0x32, + 0x2e, 0x72, 0x65, 0x66, 0x73, 0x2e, 0x53, 0x69, 0x67, 0x6e, 0x61, 0x74, 0x75, 0x72, 0x65, 0x52, + 0x09, 0x73, 0x69, 0x67, 0x6e, 0x61, 0x74, 0x75, 0x72, 0x65, 0x22, 0x45, 0x0a, 0x11, 0x52, 0x65, + 0x70, 0x6c, 0x69, 0x63, 0x61, 0x74, 0x65, 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x12, + 0x30, 0x0a, 0x06, 0x73, 0x74, 0x61, 0x74, 0x75, 0x73, 0x18, 0x01, 0x20, 0x01, 0x28, 0x0b, 0x32, + 0x18, 0x2e, 0x6e, 0x65, 0x6f, 0x2e, 0x66, 0x73, 0x2e, 0x76, 0x32, 0x2e, 0x73, 0x74, 0x61, 0x74, + 0x75, 0x73, 0x2e, 0x53, 0x74, 0x61, 0x74, 0x75, 0x73, 0x52, 0x06, 0x73, 0x74, 0x61, 0x74, 0x75, + 0x73, 0x32, 0x88, 0x05, 0x0a, 0x0d, 0x4f, 0x62, 0x6a, 0x65, 0x63, 0x74, 0x53, 0x65, 0x72, 0x76, + 0x69, 0x63, 0x65, 0x12, 0x44, 0x0a, 0x03, 0x47, 0x65, 0x74, 0x12, 0x1c, 0x2e, 0x6e, 0x65, 0x6f, + 0x2e, 0x66, 0x73, 0x2e, 0x76, 0x32, 0x2e, 0x6f, 0x62, 0x6a, 0x65, 0x63, 0x74, 0x2e, 0x47, 0x65, + 0x74, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x1a, 0x1d, 0x2e, 0x6e, 0x65, 0x6f, 0x2e, 0x66, + 0x73, 0x2e, 0x76, 0x32, 0x2e, 0x6f, 0x62, 0x6a, 0x65, 0x63, 0x74, 0x2e, 0x47, 0x65, 0x74, 0x52, + 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x30, 0x01, 0x12, 0x44, 0x0a, 0x03, 0x50, 0x75, 0x74, + 0x12, 0x1c, 0x2e, 0x6e, 0x65, 0x6f, 0x2e, 0x66, 0x73, 0x2e, 0x76, 0x32, 0x2e, 0x6f, 0x62, 0x6a, + 0x65, 0x63, 0x74, 0x2e, 0x50, 0x75, 0x74, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x1a, 0x1d, + 0x2e, 0x6e, 0x65, 0x6f, 0x2e, 0x66, 0x73, 0x2e, 0x76, 0x32, 0x2e, 0x6f, 0x62, 0x6a, 0x65, 0x63, + 0x74, 0x2e, 0x50, 0x75, 0x74, 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x28, 0x01, 0x12, + 0x4b, 0x0a, 0x06, 0x44, 0x65, 0x6c, 0x65, 0x74, 0x65, 0x12, 0x1f, 0x2e, 0x6e, 0x65, 0x6f, 0x2e, + 0x66, 0x73, 0x2e, 0x76, 0x32, 0x2e, 0x6f, 0x62, 0x6a, 0x65, 0x63, 0x74, 0x2e, 0x44, 0x65, 0x6c, + 0x65, 0x74, 0x65, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x1a, 0x20, 0x2e, 0x6e, 0x65, 0x6f, + 0x2e, 0x66, 0x73, 0x2e, 0x76, 0x32, 0x2e, 0x6f, 0x62, 0x6a, 0x65, 0x63, 0x74, 0x2e, 0x44, 0x65, + 0x6c, 0x65, 0x74, 0x65, 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x12, 0x45, 0x0a, 0x04, + 0x48, 0x65, 0x61, 0x64, 0x12, 0x1d, 0x2e, 0x6e, 0x65, 0x6f, 0x2e, 0x66, 0x73, 0x2e, 0x76, 0x32, + 0x2e, 0x6f, 0x62, 0x6a, 0x65, 0x63, 0x74, 0x2e, 0x48, 0x65, 0x61, 0x64, 0x52, 0x65, 0x71, 0x75, + 0x65, 0x73, 0x74, 0x1a, 0x1e, 0x2e, 0x6e, 0x65, 0x6f, 0x2e, 0x66, 0x73, 0x2e, 0x76, 0x32, 0x2e, + 0x6f, 0x62, 0x6a, 0x65, 0x63, 0x74, 0x2e, 0x48, 0x65, 0x61, 0x64, 0x52, 0x65, 0x73, 0x70, 0x6f, + 0x6e, 0x73, 0x65, 0x12, 0x4d, 0x0a, 0x06, 0x53, 0x65, 0x61, 0x72, 0x63, 0x68, 0x12, 0x1f, 0x2e, + 0x6e, 0x65, 0x6f, 0x2e, 0x66, 0x73, 0x2e, 0x76, 0x32, 0x2e, 0x6f, 0x62, 0x6a, 0x65, 0x63, 0x74, + 0x2e, 0x53, 0x65, 0x61, 0x72, 0x63, 0x68, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x1a, 0x20, + 0x2e, 0x6e, 0x65, 0x6f, 0x2e, 0x66, 0x73, 0x2e, 0x76, 0x32, 0x2e, 0x6f, 0x62, 0x6a, 0x65, 0x63, + 0x74, 0x2e, 0x53, 0x65, 0x61, 0x72, 0x63, 0x68, 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, + 0x30, 0x01, 0x12, 0x53, 0x0a, 0x08, 0x47, 0x65, 0x74, 0x52, 0x61, 0x6e, 0x67, 0x65, 0x12, 0x21, + 0x2e, 0x6e, 0x65, 0x6f, 0x2e, 0x66, 0x73, 0x2e, 0x76, 0x32, 0x2e, 0x6f, 0x62, 0x6a, 0x65, 0x63, + 0x74, 0x2e, 0x47, 0x65, 0x74, 0x52, 0x61, 0x6e, 0x67, 0x65, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, + 0x74, 0x1a, 0x22, 0x2e, 0x6e, 0x65, 0x6f, 0x2e, 0x66, 0x73, 0x2e, 0x76, 0x32, 0x2e, 0x6f, 0x62, + 0x6a, 0x65, 0x63, 0x74, 0x2e, 0x47, 0x65, 0x74, 0x52, 0x61, 0x6e, 0x67, 0x65, 0x52, 0x65, 0x73, + 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x30, 0x01, 0x12, 0x5d, 0x0a, 0x0c, 0x47, 0x65, 0x74, 0x52, 0x61, + 0x6e, 0x67, 0x65, 0x48, 0x61, 0x73, 0x68, 0x12, 0x25, 0x2e, 0x6e, 0x65, 0x6f, 0x2e, 0x66, 0x73, + 0x2e, 0x76, 0x32, 0x2e, 0x6f, 0x62, 0x6a, 0x65, 0x63, 0x74, 0x2e, 0x47, 0x65, 0x74, 0x52, 0x61, + 0x6e, 0x67, 0x65, 0x48, 0x61, 0x73, 0x68, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x1a, 0x26, + 0x2e, 0x6e, 0x65, 0x6f, 0x2e, 0x66, 0x73, 0x2e, 0x76, 0x32, 0x2e, 0x6f, 0x62, 0x6a, 0x65, 0x63, + 0x74, 0x2e, 0x47, 0x65, 0x74, 0x52, 0x61, 0x6e, 0x67, 0x65, 0x48, 0x61, 0x73, 0x68, 0x52, 0x65, + 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x12, 0x54, 0x0a, 0x09, 0x52, 0x65, 0x70, 0x6c, 0x69, 0x63, + 0x61, 0x74, 0x65, 0x12, 0x22, 0x2e, 0x6e, 0x65, 0x6f, 0x2e, 0x66, 0x73, 0x2e, 0x76, 0x32, 0x2e, + 0x6f, 0x62, 0x6a, 0x65, 0x63, 0x74, 0x2e, 0x52, 0x65, 0x70, 0x6c, 0x69, 0x63, 0x61, 0x74, 0x65, + 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x1a, 0x23, 0x2e, 0x6e, 0x65, 0x6f, 0x2e, 0x66, 0x73, + 0x2e, 0x76, 0x32, 0x2e, 0x6f, 0x62, 0x6a, 0x65, 0x63, 0x74, 0x2e, 0x52, 0x65, 0x70, 0x6c, 0x69, + 0x63, 0x61, 0x74, 0x65, 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x42, 0x56, 0x5a, 0x37, + 0x67, 0x69, 0x74, 0x68, 0x75, 0x62, 0x2e, 0x63, 0x6f, 0x6d, 0x2f, 0x6e, 0x73, 0x70, 0x63, 0x63, + 0x2d, 0x64, 0x65, 0x76, 0x2f, 0x6e, 0x65, 0x6f, 0x66, 0x73, 0x2d, 0x61, 0x70, 0x69, 0x2d, 0x67, + 0x6f, 0x2f, 0x76, 0x32, 0x2f, 0x6f, 0x62, 0x6a, 0x65, 0x63, 0x74, 0x2f, 0x67, 0x72, 0x70, 0x63, + 0x3b, 0x6f, 0x62, 0x6a, 0x65, 0x63, 0x74, 0xaa, 0x02, 0x1a, 0x4e, 0x65, 0x6f, 0x2e, 0x46, 0x69, + 0x6c, 0x65, 0x53, 0x74, 0x6f, 0x72, 0x61, 0x67, 0x65, 0x2e, 0x41, 0x50, 0x49, 0x2e, 0x4f, 0x62, + 0x6a, 0x65, 0x63, 0x74, 0x62, 0x06, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x33, +} + +var ( + file_object_grpc_service_proto_rawDescOnce sync.Once + file_object_grpc_service_proto_rawDescData = file_object_grpc_service_proto_rawDesc +) + +func file_object_grpc_service_proto_rawDescGZIP() []byte { + file_object_grpc_service_proto_rawDescOnce.Do(func() { + file_object_grpc_service_proto_rawDescData = protoimpl.X.CompressGZIP(file_object_grpc_service_proto_rawDescData) + }) + return file_object_grpc_service_proto_rawDescData +} + +var file_object_grpc_service_proto_msgTypes = make([]protoimpl.MessageInfo, 35) +var file_object_grpc_service_proto_goTypes = []interface{}{ + (*GetRequest)(nil), // 0: neo.fs.v2.object.GetRequest + (*GetResponse)(nil), // 1: neo.fs.v2.object.GetResponse + (*PutRequest)(nil), // 2: neo.fs.v2.object.PutRequest + (*PutResponse)(nil), // 3: neo.fs.v2.object.PutResponse + (*DeleteRequest)(nil), // 4: neo.fs.v2.object.DeleteRequest + (*DeleteResponse)(nil), // 5: neo.fs.v2.object.DeleteResponse + (*HeadRequest)(nil), // 6: neo.fs.v2.object.HeadRequest + (*HeaderWithSignature)(nil), // 7: neo.fs.v2.object.HeaderWithSignature + (*HeadResponse)(nil), // 8: neo.fs.v2.object.HeadResponse + (*SearchRequest)(nil), // 9: neo.fs.v2.object.SearchRequest + (*SearchResponse)(nil), // 10: neo.fs.v2.object.SearchResponse + (*Range)(nil), // 11: neo.fs.v2.object.Range + (*GetRangeRequest)(nil), // 12: neo.fs.v2.object.GetRangeRequest + (*GetRangeResponse)(nil), // 13: neo.fs.v2.object.GetRangeResponse + (*GetRangeHashRequest)(nil), // 14: neo.fs.v2.object.GetRangeHashRequest + (*GetRangeHashResponse)(nil), // 15: neo.fs.v2.object.GetRangeHashResponse + (*ReplicateRequest)(nil), // 16: neo.fs.v2.object.ReplicateRequest + (*ReplicateResponse)(nil), // 17: neo.fs.v2.object.ReplicateResponse + (*GetRequest_Body)(nil), // 18: neo.fs.v2.object.GetRequest.Body + (*GetResponse_Body)(nil), // 19: neo.fs.v2.object.GetResponse.Body + (*GetResponse_Body_Init)(nil), // 20: neo.fs.v2.object.GetResponse.Body.Init + (*PutRequest_Body)(nil), // 21: neo.fs.v2.object.PutRequest.Body + (*PutRequest_Body_Init)(nil), // 22: neo.fs.v2.object.PutRequest.Body.Init + (*PutResponse_Body)(nil), // 23: neo.fs.v2.object.PutResponse.Body + (*DeleteRequest_Body)(nil), // 24: neo.fs.v2.object.DeleteRequest.Body + (*DeleteResponse_Body)(nil), // 25: neo.fs.v2.object.DeleteResponse.Body + (*HeadRequest_Body)(nil), // 26: neo.fs.v2.object.HeadRequest.Body + (*HeadResponse_Body)(nil), // 27: neo.fs.v2.object.HeadResponse.Body + (*SearchRequest_Body)(nil), // 28: neo.fs.v2.object.SearchRequest.Body + (*SearchRequest_Body_Filter)(nil), // 29: neo.fs.v2.object.SearchRequest.Body.Filter + (*SearchResponse_Body)(nil), // 30: neo.fs.v2.object.SearchResponse.Body + (*GetRangeRequest_Body)(nil), // 31: neo.fs.v2.object.GetRangeRequest.Body + (*GetRangeResponse_Body)(nil), // 32: neo.fs.v2.object.GetRangeResponse.Body + (*GetRangeHashRequest_Body)(nil), // 33: neo.fs.v2.object.GetRangeHashRequest.Body + (*GetRangeHashResponse_Body)(nil), // 34: neo.fs.v2.object.GetRangeHashResponse.Body + (*session.RequestMetaHeader)(nil), // 35: neo.fs.v2.session.RequestMetaHeader + (*session.RequestVerificationHeader)(nil), // 36: neo.fs.v2.session.RequestVerificationHeader + (*session.ResponseMetaHeader)(nil), // 37: neo.fs.v2.session.ResponseMetaHeader + (*session.ResponseVerificationHeader)(nil), // 38: neo.fs.v2.session.ResponseVerificationHeader + (*Header)(nil), // 39: neo.fs.v2.object.Header + (*refs.Signature)(nil), // 40: neo.fs.v2.refs.Signature + (*Object)(nil), // 41: neo.fs.v2.object.Object + (*status.Status)(nil), // 42: neo.fs.v2.status.Status + (*refs.Address)(nil), // 43: neo.fs.v2.refs.Address + (*SplitInfo)(nil), // 44: neo.fs.v2.object.SplitInfo + (*refs.ObjectID)(nil), // 45: neo.fs.v2.refs.ObjectID + (*ShortHeader)(nil), // 46: neo.fs.v2.object.ShortHeader + (*refs.ContainerID)(nil), // 47: neo.fs.v2.refs.ContainerID + (MatchType)(0), // 48: neo.fs.v2.object.MatchType + (refs.ChecksumType)(0), // 49: neo.fs.v2.refs.ChecksumType +} +var file_object_grpc_service_proto_depIdxs = []int32{ + 18, // 0: neo.fs.v2.object.GetRequest.body:type_name -> neo.fs.v2.object.GetRequest.Body + 35, // 1: neo.fs.v2.object.GetRequest.meta_header:type_name -> neo.fs.v2.session.RequestMetaHeader + 36, // 2: neo.fs.v2.object.GetRequest.verify_header:type_name -> neo.fs.v2.session.RequestVerificationHeader + 19, // 3: neo.fs.v2.object.GetResponse.body:type_name -> neo.fs.v2.object.GetResponse.Body + 37, // 4: neo.fs.v2.object.GetResponse.meta_header:type_name -> neo.fs.v2.session.ResponseMetaHeader + 38, // 5: neo.fs.v2.object.GetResponse.verify_header:type_name -> neo.fs.v2.session.ResponseVerificationHeader + 21, // 6: neo.fs.v2.object.PutRequest.body:type_name -> neo.fs.v2.object.PutRequest.Body + 35, // 7: neo.fs.v2.object.PutRequest.meta_header:type_name -> neo.fs.v2.session.RequestMetaHeader + 36, // 8: neo.fs.v2.object.PutRequest.verify_header:type_name -> neo.fs.v2.session.RequestVerificationHeader + 23, // 9: neo.fs.v2.object.PutResponse.body:type_name -> neo.fs.v2.object.PutResponse.Body + 37, // 10: neo.fs.v2.object.PutResponse.meta_header:type_name -> neo.fs.v2.session.ResponseMetaHeader + 38, // 11: neo.fs.v2.object.PutResponse.verify_header:type_name -> neo.fs.v2.session.ResponseVerificationHeader + 24, // 12: neo.fs.v2.object.DeleteRequest.body:type_name -> neo.fs.v2.object.DeleteRequest.Body + 35, // 13: neo.fs.v2.object.DeleteRequest.meta_header:type_name -> neo.fs.v2.session.RequestMetaHeader + 36, // 14: neo.fs.v2.object.DeleteRequest.verify_header:type_name -> neo.fs.v2.session.RequestVerificationHeader + 25, // 15: neo.fs.v2.object.DeleteResponse.body:type_name -> neo.fs.v2.object.DeleteResponse.Body + 37, // 16: neo.fs.v2.object.DeleteResponse.meta_header:type_name -> neo.fs.v2.session.ResponseMetaHeader + 38, // 17: neo.fs.v2.object.DeleteResponse.verify_header:type_name -> neo.fs.v2.session.ResponseVerificationHeader + 26, // 18: neo.fs.v2.object.HeadRequest.body:type_name -> neo.fs.v2.object.HeadRequest.Body + 35, // 19: neo.fs.v2.object.HeadRequest.meta_header:type_name -> neo.fs.v2.session.RequestMetaHeader + 36, // 20: neo.fs.v2.object.HeadRequest.verify_header:type_name -> neo.fs.v2.session.RequestVerificationHeader + 39, // 21: neo.fs.v2.object.HeaderWithSignature.header:type_name -> neo.fs.v2.object.Header + 40, // 22: neo.fs.v2.object.HeaderWithSignature.signature:type_name -> neo.fs.v2.refs.Signature + 27, // 23: neo.fs.v2.object.HeadResponse.body:type_name -> neo.fs.v2.object.HeadResponse.Body + 37, // 24: neo.fs.v2.object.HeadResponse.meta_header:type_name -> neo.fs.v2.session.ResponseMetaHeader + 38, // 25: neo.fs.v2.object.HeadResponse.verify_header:type_name -> neo.fs.v2.session.ResponseVerificationHeader + 28, // 26: neo.fs.v2.object.SearchRequest.body:type_name -> neo.fs.v2.object.SearchRequest.Body + 35, // 27: neo.fs.v2.object.SearchRequest.meta_header:type_name -> neo.fs.v2.session.RequestMetaHeader + 36, // 28: neo.fs.v2.object.SearchRequest.verify_header:type_name -> neo.fs.v2.session.RequestVerificationHeader + 30, // 29: neo.fs.v2.object.SearchResponse.body:type_name -> neo.fs.v2.object.SearchResponse.Body + 37, // 30: neo.fs.v2.object.SearchResponse.meta_header:type_name -> neo.fs.v2.session.ResponseMetaHeader + 38, // 31: neo.fs.v2.object.SearchResponse.verify_header:type_name -> neo.fs.v2.session.ResponseVerificationHeader + 31, // 32: neo.fs.v2.object.GetRangeRequest.body:type_name -> neo.fs.v2.object.GetRangeRequest.Body + 35, // 33: neo.fs.v2.object.GetRangeRequest.meta_header:type_name -> neo.fs.v2.session.RequestMetaHeader + 36, // 34: neo.fs.v2.object.GetRangeRequest.verify_header:type_name -> neo.fs.v2.session.RequestVerificationHeader + 32, // 35: neo.fs.v2.object.GetRangeResponse.body:type_name -> neo.fs.v2.object.GetRangeResponse.Body + 37, // 36: neo.fs.v2.object.GetRangeResponse.meta_header:type_name -> neo.fs.v2.session.ResponseMetaHeader + 38, // 37: neo.fs.v2.object.GetRangeResponse.verify_header:type_name -> neo.fs.v2.session.ResponseVerificationHeader + 33, // 38: neo.fs.v2.object.GetRangeHashRequest.body:type_name -> neo.fs.v2.object.GetRangeHashRequest.Body + 35, // 39: neo.fs.v2.object.GetRangeHashRequest.meta_header:type_name -> neo.fs.v2.session.RequestMetaHeader + 36, // 40: neo.fs.v2.object.GetRangeHashRequest.verify_header:type_name -> neo.fs.v2.session.RequestVerificationHeader + 34, // 41: neo.fs.v2.object.GetRangeHashResponse.body:type_name -> neo.fs.v2.object.GetRangeHashResponse.Body + 37, // 42: neo.fs.v2.object.GetRangeHashResponse.meta_header:type_name -> neo.fs.v2.session.ResponseMetaHeader + 38, // 43: neo.fs.v2.object.GetRangeHashResponse.verify_header:type_name -> neo.fs.v2.session.ResponseVerificationHeader + 41, // 44: neo.fs.v2.object.ReplicateRequest.object:type_name -> neo.fs.v2.object.Object + 40, // 45: neo.fs.v2.object.ReplicateRequest.signature:type_name -> neo.fs.v2.refs.Signature + 42, // 46: neo.fs.v2.object.ReplicateResponse.status:type_name -> neo.fs.v2.status.Status + 43, // 47: neo.fs.v2.object.GetRequest.Body.address:type_name -> neo.fs.v2.refs.Address + 20, // 48: neo.fs.v2.object.GetResponse.Body.init:type_name -> neo.fs.v2.object.GetResponse.Body.Init + 44, // 49: neo.fs.v2.object.GetResponse.Body.split_info:type_name -> neo.fs.v2.object.SplitInfo + 45, // 50: neo.fs.v2.object.GetResponse.Body.Init.object_id:type_name -> neo.fs.v2.refs.ObjectID + 40, // 51: neo.fs.v2.object.GetResponse.Body.Init.signature:type_name -> neo.fs.v2.refs.Signature + 39, // 52: neo.fs.v2.object.GetResponse.Body.Init.header:type_name -> neo.fs.v2.object.Header + 22, // 53: neo.fs.v2.object.PutRequest.Body.init:type_name -> neo.fs.v2.object.PutRequest.Body.Init + 45, // 54: neo.fs.v2.object.PutRequest.Body.Init.object_id:type_name -> neo.fs.v2.refs.ObjectID + 40, // 55: neo.fs.v2.object.PutRequest.Body.Init.signature:type_name -> neo.fs.v2.refs.Signature + 39, // 56: neo.fs.v2.object.PutRequest.Body.Init.header:type_name -> neo.fs.v2.object.Header + 45, // 57: neo.fs.v2.object.PutResponse.Body.object_id:type_name -> neo.fs.v2.refs.ObjectID + 43, // 58: neo.fs.v2.object.DeleteRequest.Body.address:type_name -> neo.fs.v2.refs.Address + 43, // 59: neo.fs.v2.object.DeleteResponse.Body.tombstone:type_name -> neo.fs.v2.refs.Address + 43, // 60: neo.fs.v2.object.HeadRequest.Body.address:type_name -> neo.fs.v2.refs.Address + 7, // 61: neo.fs.v2.object.HeadResponse.Body.header:type_name -> neo.fs.v2.object.HeaderWithSignature + 46, // 62: neo.fs.v2.object.HeadResponse.Body.short_header:type_name -> neo.fs.v2.object.ShortHeader + 44, // 63: neo.fs.v2.object.HeadResponse.Body.split_info:type_name -> neo.fs.v2.object.SplitInfo + 47, // 64: neo.fs.v2.object.SearchRequest.Body.container_id:type_name -> neo.fs.v2.refs.ContainerID + 29, // 65: neo.fs.v2.object.SearchRequest.Body.filters:type_name -> neo.fs.v2.object.SearchRequest.Body.Filter + 48, // 66: neo.fs.v2.object.SearchRequest.Body.Filter.match_type:type_name -> neo.fs.v2.object.MatchType + 45, // 67: neo.fs.v2.object.SearchResponse.Body.id_list:type_name -> neo.fs.v2.refs.ObjectID + 43, // 68: neo.fs.v2.object.GetRangeRequest.Body.address:type_name -> neo.fs.v2.refs.Address + 11, // 69: neo.fs.v2.object.GetRangeRequest.Body.range:type_name -> neo.fs.v2.object.Range + 44, // 70: neo.fs.v2.object.GetRangeResponse.Body.split_info:type_name -> neo.fs.v2.object.SplitInfo + 43, // 71: neo.fs.v2.object.GetRangeHashRequest.Body.address:type_name -> neo.fs.v2.refs.Address + 11, // 72: neo.fs.v2.object.GetRangeHashRequest.Body.ranges:type_name -> neo.fs.v2.object.Range + 49, // 73: neo.fs.v2.object.GetRangeHashRequest.Body.type:type_name -> neo.fs.v2.refs.ChecksumType + 49, // 74: neo.fs.v2.object.GetRangeHashResponse.Body.type:type_name -> neo.fs.v2.refs.ChecksumType + 0, // 75: neo.fs.v2.object.ObjectService.Get:input_type -> neo.fs.v2.object.GetRequest + 2, // 76: neo.fs.v2.object.ObjectService.Put:input_type -> neo.fs.v2.object.PutRequest + 4, // 77: neo.fs.v2.object.ObjectService.Delete:input_type -> neo.fs.v2.object.DeleteRequest + 6, // 78: neo.fs.v2.object.ObjectService.Head:input_type -> neo.fs.v2.object.HeadRequest + 9, // 79: neo.fs.v2.object.ObjectService.Search:input_type -> neo.fs.v2.object.SearchRequest + 12, // 80: neo.fs.v2.object.ObjectService.GetRange:input_type -> neo.fs.v2.object.GetRangeRequest + 14, // 81: neo.fs.v2.object.ObjectService.GetRangeHash:input_type -> neo.fs.v2.object.GetRangeHashRequest + 16, // 82: neo.fs.v2.object.ObjectService.Replicate:input_type -> neo.fs.v2.object.ReplicateRequest + 1, // 83: neo.fs.v2.object.ObjectService.Get:output_type -> neo.fs.v2.object.GetResponse + 3, // 84: neo.fs.v2.object.ObjectService.Put:output_type -> neo.fs.v2.object.PutResponse + 5, // 85: neo.fs.v2.object.ObjectService.Delete:output_type -> neo.fs.v2.object.DeleteResponse + 8, // 86: neo.fs.v2.object.ObjectService.Head:output_type -> neo.fs.v2.object.HeadResponse + 10, // 87: neo.fs.v2.object.ObjectService.Search:output_type -> neo.fs.v2.object.SearchResponse + 13, // 88: neo.fs.v2.object.ObjectService.GetRange:output_type -> neo.fs.v2.object.GetRangeResponse + 15, // 89: neo.fs.v2.object.ObjectService.GetRangeHash:output_type -> neo.fs.v2.object.GetRangeHashResponse + 17, // 90: neo.fs.v2.object.ObjectService.Replicate:output_type -> neo.fs.v2.object.ReplicateResponse + 83, // [83:91] is the sub-list for method output_type + 75, // [75:83] is the sub-list for method input_type + 75, // [75:75] is the sub-list for extension type_name + 75, // [75:75] is the sub-list for extension extendee + 0, // [0:75] is the sub-list for field type_name +} + +func init() { file_object_grpc_service_proto_init() } +func file_object_grpc_service_proto_init() { + if File_object_grpc_service_proto != nil { + return + } + file_object_grpc_types_proto_init() + if !protoimpl.UnsafeEnabled { + file_object_grpc_service_proto_msgTypes[0].Exporter = func(v interface{}, i int) interface{} { + switch v := v.(*GetRequest); i { + case 0: + return &v.state + case 1: + return &v.sizeCache + case 2: + return &v.unknownFields + default: + return nil + } + } + file_object_grpc_service_proto_msgTypes[1].Exporter = func(v interface{}, i int) interface{} { + switch v := v.(*GetResponse); i { + case 0: + return &v.state + case 1: + return &v.sizeCache + case 2: + return &v.unknownFields + default: + return nil + } + } + file_object_grpc_service_proto_msgTypes[2].Exporter = func(v interface{}, i int) interface{} { + switch v := v.(*PutRequest); i { + case 0: + return &v.state + case 1: + return &v.sizeCache + case 2: + return &v.unknownFields + default: + return nil + } + } + file_object_grpc_service_proto_msgTypes[3].Exporter = func(v interface{}, i int) interface{} { + switch v := v.(*PutResponse); i { + case 0: + return &v.state + case 1: + return &v.sizeCache + case 2: + return &v.unknownFields + default: + return nil + } + } + file_object_grpc_service_proto_msgTypes[4].Exporter = func(v interface{}, i int) interface{} { + switch v := v.(*DeleteRequest); i { + case 0: + return &v.state + case 1: + return &v.sizeCache + case 2: + return &v.unknownFields + default: + return nil + } + } + file_object_grpc_service_proto_msgTypes[5].Exporter = func(v interface{}, i int) interface{} { + switch v := v.(*DeleteResponse); i { + case 0: + return &v.state + case 1: + return &v.sizeCache + case 2: + return &v.unknownFields + default: + return nil + } + } + file_object_grpc_service_proto_msgTypes[6].Exporter = func(v interface{}, i int) interface{} { + switch v := v.(*HeadRequest); i { + case 0: + return &v.state + case 1: + return &v.sizeCache + case 2: + return &v.unknownFields + default: + return nil + } + } + file_object_grpc_service_proto_msgTypes[7].Exporter = func(v interface{}, i int) interface{} { + switch v := v.(*HeaderWithSignature); i { + case 0: + return &v.state + case 1: + return &v.sizeCache + case 2: + return &v.unknownFields + default: + return nil + } + } + file_object_grpc_service_proto_msgTypes[8].Exporter = func(v interface{}, i int) interface{} { + switch v := v.(*HeadResponse); i { + case 0: + return &v.state + case 1: + return &v.sizeCache + case 2: + return &v.unknownFields + default: + return nil + } + } + file_object_grpc_service_proto_msgTypes[9].Exporter = func(v interface{}, i int) interface{} { + switch v := v.(*SearchRequest); i { + case 0: + return &v.state + case 1: + return &v.sizeCache + case 2: + return &v.unknownFields + default: + return nil + } + } + file_object_grpc_service_proto_msgTypes[10].Exporter = func(v interface{}, i int) interface{} { + switch v := v.(*SearchResponse); i { + case 0: + return &v.state + case 1: + return &v.sizeCache + case 2: + return &v.unknownFields + default: + return nil + } + } + file_object_grpc_service_proto_msgTypes[11].Exporter = func(v interface{}, i int) interface{} { + switch v := v.(*Range); i { + case 0: + return &v.state + case 1: + return &v.sizeCache + case 2: + return &v.unknownFields + default: + return nil + } + } + file_object_grpc_service_proto_msgTypes[12].Exporter = func(v interface{}, i int) interface{} { + switch v := v.(*GetRangeRequest); i { + case 0: + return &v.state + case 1: + return &v.sizeCache + case 2: + return &v.unknownFields + default: + return nil + } + } + file_object_grpc_service_proto_msgTypes[13].Exporter = func(v interface{}, i int) interface{} { + switch v := v.(*GetRangeResponse); i { + case 0: + return &v.state + case 1: + return &v.sizeCache + case 2: + return &v.unknownFields + default: + return nil + } + } + file_object_grpc_service_proto_msgTypes[14].Exporter = func(v interface{}, i int) interface{} { + switch v := v.(*GetRangeHashRequest); i { + case 0: + return &v.state + case 1: + return &v.sizeCache + case 2: + return &v.unknownFields + default: + return nil + } + } + file_object_grpc_service_proto_msgTypes[15].Exporter = func(v interface{}, i int) interface{} { + switch v := v.(*GetRangeHashResponse); i { + case 0: + return &v.state + case 1: + return &v.sizeCache + case 2: + return &v.unknownFields + default: + return nil + } + } + file_object_grpc_service_proto_msgTypes[16].Exporter = func(v interface{}, i int) interface{} { + switch v := v.(*ReplicateRequest); i { + case 0: + return &v.state + case 1: + return &v.sizeCache + case 2: + return &v.unknownFields + default: + return nil + } + } + file_object_grpc_service_proto_msgTypes[17].Exporter = func(v interface{}, i int) interface{} { + switch v := v.(*ReplicateResponse); i { + case 0: + return &v.state + case 1: + return &v.sizeCache + case 2: + return &v.unknownFields + default: + return nil + } + } + file_object_grpc_service_proto_msgTypes[18].Exporter = func(v interface{}, i int) interface{} { + switch v := v.(*GetRequest_Body); i { + case 0: + return &v.state + case 1: + return &v.sizeCache + case 2: + return &v.unknownFields + default: + return nil + } + } + file_object_grpc_service_proto_msgTypes[19].Exporter = func(v interface{}, i int) interface{} { + switch v := v.(*GetResponse_Body); i { + case 0: + return &v.state + case 1: + return &v.sizeCache + case 2: + return &v.unknownFields + default: + return nil + } + } + file_object_grpc_service_proto_msgTypes[20].Exporter = func(v interface{}, i int) interface{} { + switch v := v.(*GetResponse_Body_Init); i { + case 0: + return &v.state + case 1: + return &v.sizeCache + case 2: + return &v.unknownFields + default: + return nil + } + } + file_object_grpc_service_proto_msgTypes[21].Exporter = func(v interface{}, i int) interface{} { + switch v := v.(*PutRequest_Body); i { + case 0: + return &v.state + case 1: + return &v.sizeCache + case 2: + return &v.unknownFields + default: + return nil + } + } + file_object_grpc_service_proto_msgTypes[22].Exporter = func(v interface{}, i int) interface{} { + switch v := v.(*PutRequest_Body_Init); i { + case 0: + return &v.state + case 1: + return &v.sizeCache + case 2: + return &v.unknownFields + default: + return nil + } + } + file_object_grpc_service_proto_msgTypes[23].Exporter = func(v interface{}, i int) interface{} { + switch v := v.(*PutResponse_Body); i { + case 0: + return &v.state + case 1: + return &v.sizeCache + case 2: + return &v.unknownFields + default: + return nil + } + } + file_object_grpc_service_proto_msgTypes[24].Exporter = func(v interface{}, i int) interface{} { + switch v := v.(*DeleteRequest_Body); i { + case 0: + return &v.state + case 1: + return &v.sizeCache + case 2: + return &v.unknownFields + default: + return nil + } + } + file_object_grpc_service_proto_msgTypes[25].Exporter = func(v interface{}, i int) interface{} { + switch v := v.(*DeleteResponse_Body); i { + case 0: + return &v.state + case 1: + return &v.sizeCache + case 2: + return &v.unknownFields + default: + return nil + } + } + file_object_grpc_service_proto_msgTypes[26].Exporter = func(v interface{}, i int) interface{} { + switch v := v.(*HeadRequest_Body); i { + case 0: + return &v.state + case 1: + return &v.sizeCache + case 2: + return &v.unknownFields + default: + return nil + } + } + file_object_grpc_service_proto_msgTypes[27].Exporter = func(v interface{}, i int) interface{} { + switch v := v.(*HeadResponse_Body); i { + case 0: + return &v.state + case 1: + return &v.sizeCache + case 2: + return &v.unknownFields + default: + return nil + } + } + file_object_grpc_service_proto_msgTypes[28].Exporter = func(v interface{}, i int) interface{} { + switch v := v.(*SearchRequest_Body); i { + case 0: + return &v.state + case 1: + return &v.sizeCache + case 2: + return &v.unknownFields + default: + return nil + } + } + file_object_grpc_service_proto_msgTypes[29].Exporter = func(v interface{}, i int) interface{} { + switch v := v.(*SearchRequest_Body_Filter); i { + case 0: + return &v.state + case 1: + return &v.sizeCache + case 2: + return &v.unknownFields + default: + return nil + } + } + file_object_grpc_service_proto_msgTypes[30].Exporter = func(v interface{}, i int) interface{} { + switch v := v.(*SearchResponse_Body); i { + case 0: + return &v.state + case 1: + return &v.sizeCache + case 2: + return &v.unknownFields + default: + return nil + } + } + file_object_grpc_service_proto_msgTypes[31].Exporter = func(v interface{}, i int) interface{} { + switch v := v.(*GetRangeRequest_Body); i { + case 0: + return &v.state + case 1: + return &v.sizeCache + case 2: + return &v.unknownFields + default: + return nil + } + } + file_object_grpc_service_proto_msgTypes[32].Exporter = func(v interface{}, i int) interface{} { + switch v := v.(*GetRangeResponse_Body); i { + case 0: + return &v.state + case 1: + return &v.sizeCache + case 2: + return &v.unknownFields + default: + return nil + } + } + file_object_grpc_service_proto_msgTypes[33].Exporter = func(v interface{}, i int) interface{} { + switch v := v.(*GetRangeHashRequest_Body); i { + case 0: + return &v.state + case 1: + return &v.sizeCache + case 2: + return &v.unknownFields + default: + return nil + } + } + file_object_grpc_service_proto_msgTypes[34].Exporter = func(v interface{}, i int) interface{} { + switch v := v.(*GetRangeHashResponse_Body); i { + case 0: + return &v.state + case 1: + return &v.sizeCache + case 2: + return &v.unknownFields + default: + return nil + } + } + } + file_object_grpc_service_proto_msgTypes[19].OneofWrappers = []interface{}{ + (*GetResponse_Body_Init_)(nil), + (*GetResponse_Body_Chunk)(nil), + (*GetResponse_Body_SplitInfo)(nil), + } + file_object_grpc_service_proto_msgTypes[21].OneofWrappers = []interface{}{ + (*PutRequest_Body_Init_)(nil), + (*PutRequest_Body_Chunk)(nil), + } + file_object_grpc_service_proto_msgTypes[27].OneofWrappers = []interface{}{ + (*HeadResponse_Body_Header)(nil), + (*HeadResponse_Body_ShortHeader)(nil), + (*HeadResponse_Body_SplitInfo)(nil), + } + file_object_grpc_service_proto_msgTypes[32].OneofWrappers = []interface{}{ + (*GetRangeResponse_Body_Chunk)(nil), + (*GetRangeResponse_Body_SplitInfo)(nil), + } + type x struct{} + out := protoimpl.TypeBuilder{ + File: protoimpl.DescBuilder{ + GoPackagePath: reflect.TypeOf(x{}).PkgPath(), + RawDescriptor: file_object_grpc_service_proto_rawDesc, + NumEnums: 0, + NumMessages: 35, + NumExtensions: 0, + NumServices: 1, + }, + GoTypes: file_object_grpc_service_proto_goTypes, + DependencyIndexes: file_object_grpc_service_proto_depIdxs, + MessageInfos: file_object_grpc_service_proto_msgTypes, + }.Build() + File_object_grpc_service_proto = out.File + file_object_grpc_service_proto_rawDesc = nil + file_object_grpc_service_proto_goTypes = nil + file_object_grpc_service_proto_depIdxs = nil +} diff --git a/api/object/service_grpc.pb.go b/api/object/service_grpc.pb.go new file mode 100644 index 000000000..bad2e0b15 --- /dev/null +++ b/api/object/service_grpc.pb.go @@ -0,0 +1,900 @@ +// Code generated by protoc-gen-go-grpc. DO NOT EDIT. +// versions: +// - protoc-gen-go-grpc v1.3.0 +// - protoc v4.25.1 +// source: object/grpc/service.proto + +package object + +import ( + context "context" + grpc "google.golang.org/grpc" + codes "google.golang.org/grpc/codes" + status "google.golang.org/grpc/status" +) + +// This is a compile-time assertion to ensure that this generated file +// is compatible with the grpc package it is being compiled against. +// Requires gRPC-Go v1.32.0 or later. +const _ = grpc.SupportPackageIsVersion7 + +const ( + ObjectService_Get_FullMethodName = "/neo.fs.v2.object.ObjectService/Get" + ObjectService_Put_FullMethodName = "/neo.fs.v2.object.ObjectService/Put" + ObjectService_Delete_FullMethodName = "/neo.fs.v2.object.ObjectService/Delete" + ObjectService_Head_FullMethodName = "/neo.fs.v2.object.ObjectService/Head" + ObjectService_Search_FullMethodName = "/neo.fs.v2.object.ObjectService/Search" + ObjectService_GetRange_FullMethodName = "/neo.fs.v2.object.ObjectService/GetRange" + ObjectService_GetRangeHash_FullMethodName = "/neo.fs.v2.object.ObjectService/GetRangeHash" + ObjectService_Replicate_FullMethodName = "/neo.fs.v2.object.ObjectService/Replicate" +) + +// ObjectServiceClient is the client API for ObjectService service. +// +// For semantics around ctx use and closing/ending streaming RPCs, please refer to https://pkg.go.dev/google.golang.org/grpc/?tab=doc#ClientConn.NewStream. +type ObjectServiceClient interface { + // Receive full object structure, including Headers and payload. Response uses + // gRPC stream. First response message carries the object with the requested address. + // Chunk messages are parts of the object's payload if it is needed. All + // messages, except the first one, carry payload chunks. The requested object can + // be restored by concatenation of object message payload and all chunks + // keeping the receiving order. + // + // Extended headers can change `Get` behaviour: + // - __NEOFS__NETMAP_EPOCH \ + // Will use the requsted version of Network Map for object placement + // calculation. DEPRECATED: header ignored by servers. + // - __NEOFS__NETMAP_LOOKUP_DEPTH \ + // Will try older versions (starting from `__NEOFS__NETMAP_EPOCH` if specified or + // the latest one otherwise) of Network Map to find an object until the depth + // limit is reached. DEPRECATED: header ignored by servers. + // + // Please refer to detailed `XHeader` description. + // + // Statuses: + // - **OK** (0, SECTION_SUCCESS): \ + // object has been successfully read; + // - Common failures (SECTION_FAILURE_COMMON); + // - **ACCESS_DENIED** (2048, SECTION_OBJECT): \ + // read access to the object is denied; + // - **OBJECT_NOT_FOUND** (2049, SECTION_OBJECT): \ + // object not found in container; + // - **OBJECT_ALREADY_REMOVED** (2052, SECTION_OBJECT): \ + // the requested object has been marked as deleted; + // - **CONTAINER_NOT_FOUND** (3072, SECTION_CONTAINER): \ + // object container not found; + // - **TOKEN_EXPIRED** (4097, SECTION_SESSION): \ + // provided session token has expired. + Get(ctx context.Context, in *GetRequest, opts ...grpc.CallOption) (ObjectService_GetClient, error) + // Put the object into container. Request uses gRPC stream. First message + // SHOULD be of PutHeader type. `ContainerID` and `OwnerID` of an object + // SHOULD be set. Session token SHOULD be obtained before `PUT` operation (see + // session package). Chunk messages are considered by server as a part of an + // object payload. All messages, except first one, SHOULD be payload chunks. + // Chunk messages SHOULD be sent in the direct order of fragmentation. + // + // Extended headers can change `Put` behaviour: + // - __NEOFS__NETMAP_EPOCH \ + // Will use the requsted version of Network Map for object placement + // calculation. DEPRECATED: header ignored by servers. + // + // Please refer to detailed `XHeader` description. + // + // Statuses: + // - **OK** (0, SECTION_SUCCESS): \ + // object has been successfully saved in the container; + // - Common failures (SECTION_FAILURE_COMMON); + // - **ACCESS_DENIED** (2048, SECTION_OBJECT): \ + // write access to the container is denied; + // - **LOCKED** (2050, SECTION_OBJECT): \ + // placement of an object of type TOMBSTONE that includes at least one locked + // object is prohibited; + // - **LOCK_NON_REGULAR_OBJECT** (2051, SECTION_OBJECT): \ + // placement of an object of type LOCK that includes at least one object of + // type other than REGULAR is prohibited; + // - **CONTAINER_NOT_FOUND** (3072, SECTION_CONTAINER): \ + // object storage container not found; + // - **TOKEN_NOT_FOUND** (4096, SECTION_SESSION): \ + // (for trusted object preparation) session private key does not exist or has + // + // been deleted; + // - **TOKEN_EXPIRED** (4097, SECTION_SESSION): \ + // provided session token has expired. + Put(ctx context.Context, opts ...grpc.CallOption) (ObjectService_PutClient, error) + // Delete the object from a container. There is no immediate removal + // guarantee. Object will be marked for removal and deleted eventually. + // + // Extended headers can change `Delete` behaviour: + // - __NEOFS__NETMAP_EPOCH \ + // Will use the requsted version of Network Map for object placement + // calculation. DEPRECATED: header ignored by servers. + // + // Please refer to detailed `XHeader` description. + // + // Statuses: + // - **OK** (0, SECTION_SUCCESS): \ + // object has been successfully marked to be removed from the container; + // - Common failures (SECTION_FAILURE_COMMON); + // - **ACCESS_DENIED** (2048, SECTION_OBJECT): \ + // delete access to the object is denied; + // - **LOCKED** (2050, SECTION_OBJECT): \ + // deleting a locked object is prohibited; + // - **CONTAINER_NOT_FOUND** (3072, SECTION_CONTAINER): \ + // object container not found; + // - **TOKEN_EXPIRED** (4097, SECTION_SESSION): \ + // provided session token has expired. + Delete(ctx context.Context, in *DeleteRequest, opts ...grpc.CallOption) (*DeleteResponse, error) + // Returns the object Headers without data payload. By default full header is + // returned. If `main_only` request field is set, the short header with only + // the very minimal information will be returned instead. + // + // Extended headers can change `Head` behaviour: + // - __NEOFS__NETMAP_EPOCH \ + // Will use the requsted version of Network Map for object placement + // calculation. + // + // Please refer to detailed `XHeader` description. + // + // Statuses: + // - **OK** (0, SECTION_SUCCESS): \ + // object header has been successfully read; + // - Common failures (SECTION_FAILURE_COMMON); + // - **ACCESS_DENIED** (2048, SECTION_OBJECT): \ + // access to operation HEAD of the object is denied; + // - **OBJECT_NOT_FOUND** (2049, SECTION_OBJECT): \ + // object not found in container; + // - **OBJECT_ALREADY_REMOVED** (2052, SECTION_OBJECT): \ + // the requested object has been marked as deleted; + // - **CONTAINER_NOT_FOUND** (3072, SECTION_CONTAINER): \ + // object container not found; + // - **TOKEN_EXPIRED** (4097, SECTION_SESSION): \ + // provided session token has expired. + Head(ctx context.Context, in *HeadRequest, opts ...grpc.CallOption) (*HeadResponse, error) + // Search objects in container. Search query allows to match by Object + // Header's filed values. Please see the corresponding NeoFS Technical + // Specification section for more details. + // + // Extended headers can change `Search` behaviour: + // - __NEOFS__NETMAP_EPOCH \ + // Will use the requsted version of Network Map for object placement + // calculation. + // + // Please refer to detailed `XHeader` description. + // + // Statuses: + // - **OK** (0, SECTION_SUCCESS): \ + // objects have been successfully selected; + // - Common failures (SECTION_FAILURE_COMMON); + // - **ACCESS_DENIED** (2048, SECTION_OBJECT): \ + // access to operation SEARCH of the object is denied; + // - **CONTAINER_NOT_FOUND** (3072, SECTION_CONTAINER): \ + // search container not found; + // - **TOKEN_EXPIRED** (4097, SECTION_SESSION): \ + // provided session token has expired. + Search(ctx context.Context, in *SearchRequest, opts ...grpc.CallOption) (ObjectService_SearchClient, error) + // Get byte range of data payload. Range is set as an (offset, length) tuple. + // Like in `Get` method, the response uses gRPC stream. Requested range can be + // restored by concatenation of all received payload chunks keeping the receiving + // order. + // + // Extended headers can change `GetRange` behaviour: + // - __NEOFS__NETMAP_EPOCH \ + // Will use the requsted version of Network Map for object placement + // calculation. DEPRECATED: header ignored by servers. + // - __NEOFS__NETMAP_LOOKUP_DEPTH \ + // Will try older versions of Network Map to find an object until the depth + // limit is reached. DEPRECATED: header ignored by servers. + // + // Please refer to detailed `XHeader` description. + // + // Statuses: + // - **OK** (0, SECTION_SUCCESS): \ + // data range of the object payload has been successfully read; + // - Common failures (SECTION_FAILURE_COMMON); + // - **ACCESS_DENIED** (2048, SECTION_OBJECT): \ + // access to operation RANGE of the object is denied; + // - **OBJECT_NOT_FOUND** (2049, SECTION_OBJECT): \ + // object not found in container; + // - **OBJECT_ALREADY_REMOVED** (2052, SECTION_OBJECT): \ + // the requested object has been marked as deleted. + // - **OUT_OF_RANGE** (2053, SECTION_OBJECT): \ + // the requested range is out of bounds; + // - **CONTAINER_NOT_FOUND** (3072, SECTION_CONTAINER): \ + // object container not found; + // - **TOKEN_EXPIRED** (4097, SECTION_SESSION): \ + // provided session token has expired. + GetRange(ctx context.Context, in *GetRangeRequest, opts ...grpc.CallOption) (ObjectService_GetRangeClient, error) + // Returns homomorphic or regular hash of object's payload range after + // applying XOR operation with the provided `salt`. Ranges are set of (offset, + // length) tuples. Hashes order in response corresponds to the ranges order in + // the request. Note that hash is calculated for XORed data. + // + // Extended headers can change `GetRangeHash` behaviour: + // - __NEOFS__NETMAP_EPOCH \ + // Will use the requsted version of Network Map for object placement + // calculation. DEPRECATED: header ignored by servers. + // - __NEOFS__NETMAP_LOOKUP_DEPTH \ + // Will try older versions of Network Map to find an object until the depth + // limit is reached. DEPRECATED: header ignored by servers. + // + // Please refer to detailed `XHeader` description. + // + // Statuses: + // - **OK** (0, SECTION_SUCCESS): \ + // data range of the object payload has been successfully hashed; + // - Common failures (SECTION_FAILURE_COMMON); + // - **ACCESS_DENIED** (2048, SECTION_OBJECT): \ + // access to operation RANGEHASH of the object is denied; + // - **OBJECT_NOT_FOUND** (2049, SECTION_OBJECT): \ + // object not found in container; + // - **OUT_OF_RANGE** (2053, SECTION_OBJECT): \ + // the requested range is out of bounds; + // - **CONTAINER_NOT_FOUND** (3072, SECTION_CONTAINER): \ + // object container not found; + // - **TOKEN_EXPIRED** (4097, SECTION_SESSION): \ + // provided session token has expired. + GetRangeHash(ctx context.Context, in *GetRangeHashRequest, opts ...grpc.CallOption) (*GetRangeHashResponse, error) + // Save replica of the object on the NeoFS storage node. Both client and + // server must authenticate NeoFS storage nodes matching storage policy of + // the container referenced by the replicated object. Thus, this operation is + // purely system: regular users should not pay attention to it but use Put. + // + // Statuses: + // - **OK** (0, SECTION_SUCCESS): \ + // the object has been successfully replicated; + // - **INTERNAL_SERVER_ERROR** (1024, SECTION_FAILURE_COMMON): \ + // internal server error described in the text message; + // - **ACCESS_DENIED** (2048, SECTION_OBJECT): \ + // the client does not authenticate any NeoFS storage node matching storage + // policy of the container referenced by the replicated object + // - **CONTAINER_NOT_FOUND** (3072, SECTION_CONTAINER): \ + // the container to which the replicated object is associated was not found. + Replicate(ctx context.Context, in *ReplicateRequest, opts ...grpc.CallOption) (*ReplicateResponse, error) +} + +type objectServiceClient struct { + cc grpc.ClientConnInterface +} + +func NewObjectServiceClient(cc grpc.ClientConnInterface) ObjectServiceClient { + return &objectServiceClient{cc} +} + +func (c *objectServiceClient) Get(ctx context.Context, in *GetRequest, opts ...grpc.CallOption) (ObjectService_GetClient, error) { + stream, err := c.cc.NewStream(ctx, &ObjectService_ServiceDesc.Streams[0], ObjectService_Get_FullMethodName, opts...) + if err != nil { + return nil, err + } + x := &objectServiceGetClient{stream} + if err := x.ClientStream.SendMsg(in); err != nil { + return nil, err + } + if err := x.ClientStream.CloseSend(); err != nil { + return nil, err + } + return x, nil +} + +type ObjectService_GetClient interface { + Recv() (*GetResponse, error) + grpc.ClientStream +} + +type objectServiceGetClient struct { + grpc.ClientStream +} + +func (x *objectServiceGetClient) Recv() (*GetResponse, error) { + m := new(GetResponse) + if err := x.ClientStream.RecvMsg(m); err != nil { + return nil, err + } + return m, nil +} + +func (c *objectServiceClient) Put(ctx context.Context, opts ...grpc.CallOption) (ObjectService_PutClient, error) { + stream, err := c.cc.NewStream(ctx, &ObjectService_ServiceDesc.Streams[1], ObjectService_Put_FullMethodName, opts...) + if err != nil { + return nil, err + } + x := &objectServicePutClient{stream} + return x, nil +} + +type ObjectService_PutClient interface { + Send(*PutRequest) error + CloseAndRecv() (*PutResponse, error) + grpc.ClientStream +} + +type objectServicePutClient struct { + grpc.ClientStream +} + +func (x *objectServicePutClient) Send(m *PutRequest) error { + return x.ClientStream.SendMsg(m) +} + +func (x *objectServicePutClient) CloseAndRecv() (*PutResponse, error) { + if err := x.ClientStream.CloseSend(); err != nil { + return nil, err + } + m := new(PutResponse) + if err := x.ClientStream.RecvMsg(m); err != nil { + return nil, err + } + return m, nil +} + +func (c *objectServiceClient) Delete(ctx context.Context, in *DeleteRequest, opts ...grpc.CallOption) (*DeleteResponse, error) { + out := new(DeleteResponse) + err := c.cc.Invoke(ctx, ObjectService_Delete_FullMethodName, in, out, opts...) + if err != nil { + return nil, err + } + return out, nil +} + +func (c *objectServiceClient) Head(ctx context.Context, in *HeadRequest, opts ...grpc.CallOption) (*HeadResponse, error) { + out := new(HeadResponse) + err := c.cc.Invoke(ctx, ObjectService_Head_FullMethodName, in, out, opts...) + if err != nil { + return nil, err + } + return out, nil +} + +func (c *objectServiceClient) Search(ctx context.Context, in *SearchRequest, opts ...grpc.CallOption) (ObjectService_SearchClient, error) { + stream, err := c.cc.NewStream(ctx, &ObjectService_ServiceDesc.Streams[2], ObjectService_Search_FullMethodName, opts...) + if err != nil { + return nil, err + } + x := &objectServiceSearchClient{stream} + if err := x.ClientStream.SendMsg(in); err != nil { + return nil, err + } + if err := x.ClientStream.CloseSend(); err != nil { + return nil, err + } + return x, nil +} + +type ObjectService_SearchClient interface { + Recv() (*SearchResponse, error) + grpc.ClientStream +} + +type objectServiceSearchClient struct { + grpc.ClientStream +} + +func (x *objectServiceSearchClient) Recv() (*SearchResponse, error) { + m := new(SearchResponse) + if err := x.ClientStream.RecvMsg(m); err != nil { + return nil, err + } + return m, nil +} + +func (c *objectServiceClient) GetRange(ctx context.Context, in *GetRangeRequest, opts ...grpc.CallOption) (ObjectService_GetRangeClient, error) { + stream, err := c.cc.NewStream(ctx, &ObjectService_ServiceDesc.Streams[3], ObjectService_GetRange_FullMethodName, opts...) + if err != nil { + return nil, err + } + x := &objectServiceGetRangeClient{stream} + if err := x.ClientStream.SendMsg(in); err != nil { + return nil, err + } + if err := x.ClientStream.CloseSend(); err != nil { + return nil, err + } + return x, nil +} + +type ObjectService_GetRangeClient interface { + Recv() (*GetRangeResponse, error) + grpc.ClientStream +} + +type objectServiceGetRangeClient struct { + grpc.ClientStream +} + +func (x *objectServiceGetRangeClient) Recv() (*GetRangeResponse, error) { + m := new(GetRangeResponse) + if err := x.ClientStream.RecvMsg(m); err != nil { + return nil, err + } + return m, nil +} + +func (c *objectServiceClient) GetRangeHash(ctx context.Context, in *GetRangeHashRequest, opts ...grpc.CallOption) (*GetRangeHashResponse, error) { + out := new(GetRangeHashResponse) + err := c.cc.Invoke(ctx, ObjectService_GetRangeHash_FullMethodName, in, out, opts...) + if err != nil { + return nil, err + } + return out, nil +} + +func (c *objectServiceClient) Replicate(ctx context.Context, in *ReplicateRequest, opts ...grpc.CallOption) (*ReplicateResponse, error) { + out := new(ReplicateResponse) + err := c.cc.Invoke(ctx, ObjectService_Replicate_FullMethodName, in, out, opts...) + if err != nil { + return nil, err + } + return out, nil +} + +// ObjectServiceServer is the server API for ObjectService service. +// All implementations should embed UnimplementedObjectServiceServer +// for forward compatibility +type ObjectServiceServer interface { + // Receive full object structure, including Headers and payload. Response uses + // gRPC stream. First response message carries the object with the requested address. + // Chunk messages are parts of the object's payload if it is needed. All + // messages, except the first one, carry payload chunks. The requested object can + // be restored by concatenation of object message payload and all chunks + // keeping the receiving order. + // + // Extended headers can change `Get` behaviour: + // - __NEOFS__NETMAP_EPOCH \ + // Will use the requsted version of Network Map for object placement + // calculation. DEPRECATED: header ignored by servers. + // - __NEOFS__NETMAP_LOOKUP_DEPTH \ + // Will try older versions (starting from `__NEOFS__NETMAP_EPOCH` if specified or + // the latest one otherwise) of Network Map to find an object until the depth + // limit is reached. DEPRECATED: header ignored by servers. + // + // Please refer to detailed `XHeader` description. + // + // Statuses: + // - **OK** (0, SECTION_SUCCESS): \ + // object has been successfully read; + // - Common failures (SECTION_FAILURE_COMMON); + // - **ACCESS_DENIED** (2048, SECTION_OBJECT): \ + // read access to the object is denied; + // - **OBJECT_NOT_FOUND** (2049, SECTION_OBJECT): \ + // object not found in container; + // - **OBJECT_ALREADY_REMOVED** (2052, SECTION_OBJECT): \ + // the requested object has been marked as deleted; + // - **CONTAINER_NOT_FOUND** (3072, SECTION_CONTAINER): \ + // object container not found; + // - **TOKEN_EXPIRED** (4097, SECTION_SESSION): \ + // provided session token has expired. + Get(*GetRequest, ObjectService_GetServer) error + // Put the object into container. Request uses gRPC stream. First message + // SHOULD be of PutHeader type. `ContainerID` and `OwnerID` of an object + // SHOULD be set. Session token SHOULD be obtained before `PUT` operation (see + // session package). Chunk messages are considered by server as a part of an + // object payload. All messages, except first one, SHOULD be payload chunks. + // Chunk messages SHOULD be sent in the direct order of fragmentation. + // + // Extended headers can change `Put` behaviour: + // - __NEOFS__NETMAP_EPOCH \ + // Will use the requsted version of Network Map for object placement + // calculation. DEPRECATED: header ignored by servers. + // + // Please refer to detailed `XHeader` description. + // + // Statuses: + // - **OK** (0, SECTION_SUCCESS): \ + // object has been successfully saved in the container; + // - Common failures (SECTION_FAILURE_COMMON); + // - **ACCESS_DENIED** (2048, SECTION_OBJECT): \ + // write access to the container is denied; + // - **LOCKED** (2050, SECTION_OBJECT): \ + // placement of an object of type TOMBSTONE that includes at least one locked + // object is prohibited; + // - **LOCK_NON_REGULAR_OBJECT** (2051, SECTION_OBJECT): \ + // placement of an object of type LOCK that includes at least one object of + // type other than REGULAR is prohibited; + // - **CONTAINER_NOT_FOUND** (3072, SECTION_CONTAINER): \ + // object storage container not found; + // - **TOKEN_NOT_FOUND** (4096, SECTION_SESSION): \ + // (for trusted object preparation) session private key does not exist or has + // + // been deleted; + // - **TOKEN_EXPIRED** (4097, SECTION_SESSION): \ + // provided session token has expired. + Put(ObjectService_PutServer) error + // Delete the object from a container. There is no immediate removal + // guarantee. Object will be marked for removal and deleted eventually. + // + // Extended headers can change `Delete` behaviour: + // - __NEOFS__NETMAP_EPOCH \ + // Will use the requsted version of Network Map for object placement + // calculation. DEPRECATED: header ignored by servers. + // + // Please refer to detailed `XHeader` description. + // + // Statuses: + // - **OK** (0, SECTION_SUCCESS): \ + // object has been successfully marked to be removed from the container; + // - Common failures (SECTION_FAILURE_COMMON); + // - **ACCESS_DENIED** (2048, SECTION_OBJECT): \ + // delete access to the object is denied; + // - **LOCKED** (2050, SECTION_OBJECT): \ + // deleting a locked object is prohibited; + // - **CONTAINER_NOT_FOUND** (3072, SECTION_CONTAINER): \ + // object container not found; + // - **TOKEN_EXPIRED** (4097, SECTION_SESSION): \ + // provided session token has expired. + Delete(context.Context, *DeleteRequest) (*DeleteResponse, error) + // Returns the object Headers without data payload. By default full header is + // returned. If `main_only` request field is set, the short header with only + // the very minimal information will be returned instead. + // + // Extended headers can change `Head` behaviour: + // - __NEOFS__NETMAP_EPOCH \ + // Will use the requsted version of Network Map for object placement + // calculation. + // + // Please refer to detailed `XHeader` description. + // + // Statuses: + // - **OK** (0, SECTION_SUCCESS): \ + // object header has been successfully read; + // - Common failures (SECTION_FAILURE_COMMON); + // - **ACCESS_DENIED** (2048, SECTION_OBJECT): \ + // access to operation HEAD of the object is denied; + // - **OBJECT_NOT_FOUND** (2049, SECTION_OBJECT): \ + // object not found in container; + // - **OBJECT_ALREADY_REMOVED** (2052, SECTION_OBJECT): \ + // the requested object has been marked as deleted; + // - **CONTAINER_NOT_FOUND** (3072, SECTION_CONTAINER): \ + // object container not found; + // - **TOKEN_EXPIRED** (4097, SECTION_SESSION): \ + // provided session token has expired. + Head(context.Context, *HeadRequest) (*HeadResponse, error) + // Search objects in container. Search query allows to match by Object + // Header's filed values. Please see the corresponding NeoFS Technical + // Specification section for more details. + // + // Extended headers can change `Search` behaviour: + // - __NEOFS__NETMAP_EPOCH \ + // Will use the requsted version of Network Map for object placement + // calculation. + // + // Please refer to detailed `XHeader` description. + // + // Statuses: + // - **OK** (0, SECTION_SUCCESS): \ + // objects have been successfully selected; + // - Common failures (SECTION_FAILURE_COMMON); + // - **ACCESS_DENIED** (2048, SECTION_OBJECT): \ + // access to operation SEARCH of the object is denied; + // - **CONTAINER_NOT_FOUND** (3072, SECTION_CONTAINER): \ + // search container not found; + // - **TOKEN_EXPIRED** (4097, SECTION_SESSION): \ + // provided session token has expired. + Search(*SearchRequest, ObjectService_SearchServer) error + // Get byte range of data payload. Range is set as an (offset, length) tuple. + // Like in `Get` method, the response uses gRPC stream. Requested range can be + // restored by concatenation of all received payload chunks keeping the receiving + // order. + // + // Extended headers can change `GetRange` behaviour: + // - __NEOFS__NETMAP_EPOCH \ + // Will use the requsted version of Network Map for object placement + // calculation. DEPRECATED: header ignored by servers. + // - __NEOFS__NETMAP_LOOKUP_DEPTH \ + // Will try older versions of Network Map to find an object until the depth + // limit is reached. DEPRECATED: header ignored by servers. + // + // Please refer to detailed `XHeader` description. + // + // Statuses: + // - **OK** (0, SECTION_SUCCESS): \ + // data range of the object payload has been successfully read; + // - Common failures (SECTION_FAILURE_COMMON); + // - **ACCESS_DENIED** (2048, SECTION_OBJECT): \ + // access to operation RANGE of the object is denied; + // - **OBJECT_NOT_FOUND** (2049, SECTION_OBJECT): \ + // object not found in container; + // - **OBJECT_ALREADY_REMOVED** (2052, SECTION_OBJECT): \ + // the requested object has been marked as deleted. + // - **OUT_OF_RANGE** (2053, SECTION_OBJECT): \ + // the requested range is out of bounds; + // - **CONTAINER_NOT_FOUND** (3072, SECTION_CONTAINER): \ + // object container not found; + // - **TOKEN_EXPIRED** (4097, SECTION_SESSION): \ + // provided session token has expired. + GetRange(*GetRangeRequest, ObjectService_GetRangeServer) error + // Returns homomorphic or regular hash of object's payload range after + // applying XOR operation with the provided `salt`. Ranges are set of (offset, + // length) tuples. Hashes order in response corresponds to the ranges order in + // the request. Note that hash is calculated for XORed data. + // + // Extended headers can change `GetRangeHash` behaviour: + // - __NEOFS__NETMAP_EPOCH \ + // Will use the requsted version of Network Map for object placement + // calculation. DEPRECATED: header ignored by servers. + // - __NEOFS__NETMAP_LOOKUP_DEPTH \ + // Will try older versions of Network Map to find an object until the depth + // limit is reached. DEPRECATED: header ignored by servers. + // + // Please refer to detailed `XHeader` description. + // + // Statuses: + // - **OK** (0, SECTION_SUCCESS): \ + // data range of the object payload has been successfully hashed; + // - Common failures (SECTION_FAILURE_COMMON); + // - **ACCESS_DENIED** (2048, SECTION_OBJECT): \ + // access to operation RANGEHASH of the object is denied; + // - **OBJECT_NOT_FOUND** (2049, SECTION_OBJECT): \ + // object not found in container; + // - **OUT_OF_RANGE** (2053, SECTION_OBJECT): \ + // the requested range is out of bounds; + // - **CONTAINER_NOT_FOUND** (3072, SECTION_CONTAINER): \ + // object container not found; + // - **TOKEN_EXPIRED** (4097, SECTION_SESSION): \ + // provided session token has expired. + GetRangeHash(context.Context, *GetRangeHashRequest) (*GetRangeHashResponse, error) + // Save replica of the object on the NeoFS storage node. Both client and + // server must authenticate NeoFS storage nodes matching storage policy of + // the container referenced by the replicated object. Thus, this operation is + // purely system: regular users should not pay attention to it but use Put. + // + // Statuses: + // - **OK** (0, SECTION_SUCCESS): \ + // the object has been successfully replicated; + // - **INTERNAL_SERVER_ERROR** (1024, SECTION_FAILURE_COMMON): \ + // internal server error described in the text message; + // - **ACCESS_DENIED** (2048, SECTION_OBJECT): \ + // the client does not authenticate any NeoFS storage node matching storage + // policy of the container referenced by the replicated object + // - **CONTAINER_NOT_FOUND** (3072, SECTION_CONTAINER): \ + // the container to which the replicated object is associated was not found. + Replicate(context.Context, *ReplicateRequest) (*ReplicateResponse, error) +} + +// UnimplementedObjectServiceServer should be embedded to have forward compatible implementations. +type UnimplementedObjectServiceServer struct { +} + +func (UnimplementedObjectServiceServer) Get(*GetRequest, ObjectService_GetServer) error { + return status.Errorf(codes.Unimplemented, "method Get not implemented") +} +func (UnimplementedObjectServiceServer) Put(ObjectService_PutServer) error { + return status.Errorf(codes.Unimplemented, "method Put not implemented") +} +func (UnimplementedObjectServiceServer) Delete(context.Context, *DeleteRequest) (*DeleteResponse, error) { + return nil, status.Errorf(codes.Unimplemented, "method Delete not implemented") +} +func (UnimplementedObjectServiceServer) Head(context.Context, *HeadRequest) (*HeadResponse, error) { + return nil, status.Errorf(codes.Unimplemented, "method Head not implemented") +} +func (UnimplementedObjectServiceServer) Search(*SearchRequest, ObjectService_SearchServer) error { + return status.Errorf(codes.Unimplemented, "method Search not implemented") +} +func (UnimplementedObjectServiceServer) GetRange(*GetRangeRequest, ObjectService_GetRangeServer) error { + return status.Errorf(codes.Unimplemented, "method GetRange not implemented") +} +func (UnimplementedObjectServiceServer) GetRangeHash(context.Context, *GetRangeHashRequest) (*GetRangeHashResponse, error) { + return nil, status.Errorf(codes.Unimplemented, "method GetRangeHash not implemented") +} +func (UnimplementedObjectServiceServer) Replicate(context.Context, *ReplicateRequest) (*ReplicateResponse, error) { + return nil, status.Errorf(codes.Unimplemented, "method Replicate not implemented") +} + +// UnsafeObjectServiceServer may be embedded to opt out of forward compatibility for this service. +// Use of this interface is not recommended, as added methods to ObjectServiceServer will +// result in compilation errors. +type UnsafeObjectServiceServer interface { + mustEmbedUnimplementedObjectServiceServer() +} + +func RegisterObjectServiceServer(s grpc.ServiceRegistrar, srv ObjectServiceServer) { + s.RegisterService(&ObjectService_ServiceDesc, srv) +} + +func _ObjectService_Get_Handler(srv interface{}, stream grpc.ServerStream) error { + m := new(GetRequest) + if err := stream.RecvMsg(m); err != nil { + return err + } + return srv.(ObjectServiceServer).Get(m, &objectServiceGetServer{stream}) +} + +type ObjectService_GetServer interface { + Send(*GetResponse) error + grpc.ServerStream +} + +type objectServiceGetServer struct { + grpc.ServerStream +} + +func (x *objectServiceGetServer) Send(m *GetResponse) error { + return x.ServerStream.SendMsg(m) +} + +func _ObjectService_Put_Handler(srv interface{}, stream grpc.ServerStream) error { + return srv.(ObjectServiceServer).Put(&objectServicePutServer{stream}) +} + +type ObjectService_PutServer interface { + SendAndClose(*PutResponse) error + Recv() (*PutRequest, error) + grpc.ServerStream +} + +type objectServicePutServer struct { + grpc.ServerStream +} + +func (x *objectServicePutServer) SendAndClose(m *PutResponse) error { + return x.ServerStream.SendMsg(m) +} + +func (x *objectServicePutServer) Recv() (*PutRequest, error) { + m := new(PutRequest) + if err := x.ServerStream.RecvMsg(m); err != nil { + return nil, err + } + return m, nil +} + +func _ObjectService_Delete_Handler(srv interface{}, ctx context.Context, dec func(interface{}) error, interceptor grpc.UnaryServerInterceptor) (interface{}, error) { + in := new(DeleteRequest) + if err := dec(in); err != nil { + return nil, err + } + if interceptor == nil { + return srv.(ObjectServiceServer).Delete(ctx, in) + } + info := &grpc.UnaryServerInfo{ + Server: srv, + FullMethod: ObjectService_Delete_FullMethodName, + } + handler := func(ctx context.Context, req interface{}) (interface{}, error) { + return srv.(ObjectServiceServer).Delete(ctx, req.(*DeleteRequest)) + } + return interceptor(ctx, in, info, handler) +} + +func _ObjectService_Head_Handler(srv interface{}, ctx context.Context, dec func(interface{}) error, interceptor grpc.UnaryServerInterceptor) (interface{}, error) { + in := new(HeadRequest) + if err := dec(in); err != nil { + return nil, err + } + if interceptor == nil { + return srv.(ObjectServiceServer).Head(ctx, in) + } + info := &grpc.UnaryServerInfo{ + Server: srv, + FullMethod: ObjectService_Head_FullMethodName, + } + handler := func(ctx context.Context, req interface{}) (interface{}, error) { + return srv.(ObjectServiceServer).Head(ctx, req.(*HeadRequest)) + } + return interceptor(ctx, in, info, handler) +} + +func _ObjectService_Search_Handler(srv interface{}, stream grpc.ServerStream) error { + m := new(SearchRequest) + if err := stream.RecvMsg(m); err != nil { + return err + } + return srv.(ObjectServiceServer).Search(m, &objectServiceSearchServer{stream}) +} + +type ObjectService_SearchServer interface { + Send(*SearchResponse) error + grpc.ServerStream +} + +type objectServiceSearchServer struct { + grpc.ServerStream +} + +func (x *objectServiceSearchServer) Send(m *SearchResponse) error { + return x.ServerStream.SendMsg(m) +} + +func _ObjectService_GetRange_Handler(srv interface{}, stream grpc.ServerStream) error { + m := new(GetRangeRequest) + if err := stream.RecvMsg(m); err != nil { + return err + } + return srv.(ObjectServiceServer).GetRange(m, &objectServiceGetRangeServer{stream}) +} + +type ObjectService_GetRangeServer interface { + Send(*GetRangeResponse) error + grpc.ServerStream +} + +type objectServiceGetRangeServer struct { + grpc.ServerStream +} + +func (x *objectServiceGetRangeServer) Send(m *GetRangeResponse) error { + return x.ServerStream.SendMsg(m) +} + +func _ObjectService_GetRangeHash_Handler(srv interface{}, ctx context.Context, dec func(interface{}) error, interceptor grpc.UnaryServerInterceptor) (interface{}, error) { + in := new(GetRangeHashRequest) + if err := dec(in); err != nil { + return nil, err + } + if interceptor == nil { + return srv.(ObjectServiceServer).GetRangeHash(ctx, in) + } + info := &grpc.UnaryServerInfo{ + Server: srv, + FullMethod: ObjectService_GetRangeHash_FullMethodName, + } + handler := func(ctx context.Context, req interface{}) (interface{}, error) { + return srv.(ObjectServiceServer).GetRangeHash(ctx, req.(*GetRangeHashRequest)) + } + return interceptor(ctx, in, info, handler) +} + +func _ObjectService_Replicate_Handler(srv interface{}, ctx context.Context, dec func(interface{}) error, interceptor grpc.UnaryServerInterceptor) (interface{}, error) { + in := new(ReplicateRequest) + if err := dec(in); err != nil { + return nil, err + } + if interceptor == nil { + return srv.(ObjectServiceServer).Replicate(ctx, in) + } + info := &grpc.UnaryServerInfo{ + Server: srv, + FullMethod: ObjectService_Replicate_FullMethodName, + } + handler := func(ctx context.Context, req interface{}) (interface{}, error) { + return srv.(ObjectServiceServer).Replicate(ctx, req.(*ReplicateRequest)) + } + return interceptor(ctx, in, info, handler) +} + +// ObjectService_ServiceDesc is the grpc.ServiceDesc for ObjectService service. +// It's only intended for direct use with grpc.RegisterService, +// and not to be introspected or modified (even as a copy) +var ObjectService_ServiceDesc = grpc.ServiceDesc{ + ServiceName: "neo.fs.v2.object.ObjectService", + HandlerType: (*ObjectServiceServer)(nil), + Methods: []grpc.MethodDesc{ + { + MethodName: "Delete", + Handler: _ObjectService_Delete_Handler, + }, + { + MethodName: "Head", + Handler: _ObjectService_Head_Handler, + }, + { + MethodName: "GetRangeHash", + Handler: _ObjectService_GetRangeHash_Handler, + }, + { + MethodName: "Replicate", + Handler: _ObjectService_Replicate_Handler, + }, + }, + Streams: []grpc.StreamDesc{ + { + StreamName: "Get", + Handler: _ObjectService_Get_Handler, + ServerStreams: true, + }, + { + StreamName: "Put", + Handler: _ObjectService_Put_Handler, + ClientStreams: true, + }, + { + StreamName: "Search", + Handler: _ObjectService_Search_Handler, + ServerStreams: true, + }, + { + StreamName: "GetRange", + Handler: _ObjectService_GetRange_Handler, + ServerStreams: true, + }, + }, + Metadata: "object/grpc/service.proto", +} diff --git a/api/object/status.pb.go b/api/object/status.pb.go new file mode 100644 index 000000000..5094dd41d --- /dev/null +++ b/api/object/status.pb.go @@ -0,0 +1,251 @@ +// Code generated by protoc-gen-go. DO NOT EDIT. +// versions: +// protoc-gen-go v1.27.1 +// protoc v3.18.0 +// source: v2/object/grpc/status.proto + +package object + +import ( + reflect "reflect" + sync "sync" + + "github.com/nspcc-dev/neofs-sdk-go/api/status" + protoreflect "google.golang.org/protobuf/reflect/protoreflect" + protoimpl "google.golang.org/protobuf/runtime/protoimpl" +) + +const ( + // Verify that this generated code is sufficiently up-to-date. + _ = protoimpl.EnforceVersion(20 - protoimpl.MinVersion) + // Verify that runtime/protoimpl is sufficiently up-to-date. + _ = protoimpl.EnforceVersion(protoimpl.MaxVersion - 20) +) + +type StatusCommon int32 + +const ( + StatusCommon_ACCESS_DENIED StatusCommon = 0 +) + +// Enum value maps for StatusCommon. +var ( + StatusCommon_name = map[int32]string{ + 0: "ACCESS_DENIED", + } + StatusCommon_value = map[string]int32{ + "ACCESS_DENIED": 0, + } +) + +func (x StatusCommon) Enum() *StatusCommon { + p := new(StatusCommon) + *p = x + return p +} + +func (x StatusCommon) String() string { + return protoimpl.X.EnumStringOf(x.Descriptor(), protoreflect.EnumNumber(x)) +} + +func (StatusCommon) Descriptor() protoreflect.EnumDescriptor { + return file_v2_object_grpc_status_proto_enumTypes[0].Descriptor() +} + +func (StatusCommon) Type() protoreflect.EnumType { + return &file_v2_object_grpc_status_proto_enumTypes[0] +} + +func (x StatusCommon) Number() protoreflect.EnumNumber { + return protoreflect.EnumNumber(x) +} + +// Deprecated: Use StatusCommon.Descriptor instead. +func (StatusCommon) EnumDescriptor() ([]byte, []int) { + return file_v2_object_grpc_status_proto_rawDescGZIP(), []int{0} +} + +type StatusPut int32 + +const ( + StatusPut_STATUS_PUT_INCOMPLETE StatusPut = 0 +) + +// Enum value maps for StatusPut. +var ( + StatusPut_name = map[int32]string{ + 0: "STATUS_PUT_INCOMPLETE", + } + StatusPut_value = map[string]int32{ + "STATUS_PUT_INCOMPLETE": 0, + } +) + +func (x StatusPut) Enum() *StatusPut { + p := new(StatusPut) + *p = x + return p +} + +func (x StatusPut) String() string { + return protoimpl.X.EnumStringOf(x.Descriptor(), protoreflect.EnumNumber(x)) +} + +func (StatusPut) Descriptor() protoreflect.EnumDescriptor { + return file_v2_object_grpc_status_proto_enumTypes[1].Descriptor() +} + +func (StatusPut) Type() protoreflect.EnumType { + return &file_v2_object_grpc_status_proto_enumTypes[1] +} + +func (x StatusPut) Number() protoreflect.EnumNumber { + return protoreflect.EnumNumber(x) +} + +// Deprecated: Use StatusPut.Descriptor instead. +func (StatusPut) EnumDescriptor() ([]byte, []int) { + return file_v2_object_grpc_status_proto_rawDescGZIP(), []int{1} +} + +type PutIncompleteDetail struct { + state protoimpl.MessageState + sizeCache protoimpl.SizeCache + unknownFields protoimpl.UnknownFields + + Statuses []*status.Status `protobuf:"bytes,1,rep,name=statuses,proto3" json:"statuses,omitempty"` +} + +func (x *PutIncompleteDetail) Reset() { + *x = PutIncompleteDetail{} + if protoimpl.UnsafeEnabled { + mi := &file_v2_object_grpc_status_proto_msgTypes[0] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) + } +} + +func (x *PutIncompleteDetail) String() string { + return protoimpl.X.MessageStringOf(x) +} + +func (*PutIncompleteDetail) ProtoMessage() {} + +func (x *PutIncompleteDetail) ProtoReflect() protoreflect.Message { + mi := &file_v2_object_grpc_status_proto_msgTypes[0] + if protoimpl.UnsafeEnabled && x != nil { + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + if ms.LoadMessageInfo() == nil { + ms.StoreMessageInfo(mi) + } + return ms + } + return mi.MessageOf(x) +} + +// Deprecated: Use PutIncompleteDetail.ProtoReflect.Descriptor instead. +func (*PutIncompleteDetail) Descriptor() ([]byte, []int) { + return file_v2_object_grpc_status_proto_rawDescGZIP(), []int{0} +} + +func (x *PutIncompleteDetail) GetStatuses() []*status.Status { + if x != nil { + return x.Statuses + } + return nil +} + +var File_v2_object_grpc_status_proto protoreflect.FileDescriptor + +var file_v2_object_grpc_status_proto_rawDesc = []byte{ + 0x0a, 0x1b, 0x76, 0x32, 0x2f, 0x6f, 0x62, 0x6a, 0x65, 0x63, 0x74, 0x2f, 0x67, 0x72, 0x70, 0x63, + 0x2f, 0x73, 0x74, 0x61, 0x74, 0x75, 0x73, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x12, 0x10, 0x6e, + 0x65, 0x6f, 0x2e, 0x66, 0x73, 0x2e, 0x76, 0x32, 0x2e, 0x6f, 0x62, 0x6a, 0x65, 0x63, 0x74, 0x1a, + 0x1a, 0x76, 0x32, 0x2f, 0x73, 0x74, 0x61, 0x74, 0x75, 0x73, 0x2f, 0x67, 0x72, 0x70, 0x63, 0x2f, + 0x74, 0x79, 0x70, 0x65, 0x73, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x22, 0x4b, 0x0a, 0x13, 0x50, + 0x75, 0x74, 0x49, 0x6e, 0x63, 0x6f, 0x6d, 0x70, 0x6c, 0x65, 0x74, 0x65, 0x44, 0x65, 0x74, 0x61, + 0x69, 0x6c, 0x12, 0x34, 0x0a, 0x08, 0x73, 0x74, 0x61, 0x74, 0x75, 0x73, 0x65, 0x73, 0x18, 0x01, + 0x20, 0x03, 0x28, 0x0b, 0x32, 0x18, 0x2e, 0x6e, 0x65, 0x6f, 0x2e, 0x66, 0x73, 0x2e, 0x76, 0x32, + 0x2e, 0x73, 0x74, 0x61, 0x74, 0x75, 0x73, 0x2e, 0x53, 0x74, 0x61, 0x74, 0x75, 0x73, 0x52, 0x08, + 0x73, 0x74, 0x61, 0x74, 0x75, 0x73, 0x65, 0x73, 0x2a, 0x21, 0x0a, 0x0c, 0x53, 0x74, 0x61, 0x74, + 0x75, 0x73, 0x43, 0x6f, 0x6d, 0x6d, 0x6f, 0x6e, 0x12, 0x11, 0x0a, 0x0d, 0x41, 0x43, 0x43, 0x45, + 0x53, 0x53, 0x5f, 0x44, 0x45, 0x4e, 0x49, 0x45, 0x44, 0x10, 0x00, 0x2a, 0x26, 0x0a, 0x09, 0x53, + 0x74, 0x61, 0x74, 0x75, 0x73, 0x50, 0x75, 0x74, 0x12, 0x19, 0x0a, 0x15, 0x53, 0x54, 0x41, 0x54, + 0x55, 0x53, 0x5f, 0x50, 0x55, 0x54, 0x5f, 0x49, 0x4e, 0x43, 0x4f, 0x4d, 0x50, 0x4c, 0x45, 0x54, + 0x45, 0x10, 0x00, 0x42, 0x56, 0x5a, 0x37, 0x67, 0x69, 0x74, 0x68, 0x75, 0x62, 0x2e, 0x63, 0x6f, + 0x6d, 0x2f, 0x6e, 0x73, 0x70, 0x63, 0x63, 0x2d, 0x64, 0x65, 0x76, 0x2f, 0x6e, 0x65, 0x6f, 0x66, + 0x73, 0x2d, 0x61, 0x70, 0x69, 0x2d, 0x67, 0x6f, 0x2f, 0x76, 0x32, 0x2f, 0x6f, 0x62, 0x6a, 0x65, + 0x63, 0x74, 0x2f, 0x67, 0x72, 0x70, 0x63, 0x3b, 0x6f, 0x62, 0x6a, 0x65, 0x63, 0x74, 0xaa, 0x02, + 0x1a, 0x4e, 0x65, 0x6f, 0x2e, 0x46, 0x69, 0x6c, 0x65, 0x53, 0x74, 0x6f, 0x72, 0x61, 0x67, 0x65, + 0x2e, 0x41, 0x50, 0x49, 0x2e, 0x4f, 0x62, 0x6a, 0x65, 0x63, 0x74, 0x62, 0x06, 0x70, 0x72, 0x6f, + 0x74, 0x6f, 0x33, +} + +var ( + file_v2_object_grpc_status_proto_rawDescOnce sync.Once + file_v2_object_grpc_status_proto_rawDescData = file_v2_object_grpc_status_proto_rawDesc +) + +func file_v2_object_grpc_status_proto_rawDescGZIP() []byte { + file_v2_object_grpc_status_proto_rawDescOnce.Do(func() { + file_v2_object_grpc_status_proto_rawDescData = protoimpl.X.CompressGZIP(file_v2_object_grpc_status_proto_rawDescData) + }) + return file_v2_object_grpc_status_proto_rawDescData +} + +var file_v2_object_grpc_status_proto_enumTypes = make([]protoimpl.EnumInfo, 2) +var file_v2_object_grpc_status_proto_msgTypes = make([]protoimpl.MessageInfo, 1) +var file_v2_object_grpc_status_proto_goTypes = []interface{}{ + (StatusCommon)(0), // 0: neo.fs.v2.object.StatusCommon + (StatusPut)(0), // 1: neo.fs.v2.object.StatusPut + (*PutIncompleteDetail)(nil), // 2: neo.fs.v2.object.PutIncompleteDetail + (*status.Status)(nil), // 3: neo.fs.v2.status.Status +} +var file_v2_object_grpc_status_proto_depIdxs = []int32{ + 3, // 0: neo.fs.v2.object.PutIncompleteDetail.statuses:type_name -> neo.fs.v2.status.Status + 1, // [1:1] is the sub-list for method output_type + 1, // [1:1] is the sub-list for method input_type + 1, // [1:1] is the sub-list for extension type_name + 1, // [1:1] is the sub-list for extension extendee + 0, // [0:1] is the sub-list for field type_name +} + +func init() { file_v2_object_grpc_status_proto_init() } +func file_v2_object_grpc_status_proto_init() { + if File_v2_object_grpc_status_proto != nil { + return + } + if !protoimpl.UnsafeEnabled { + file_v2_object_grpc_status_proto_msgTypes[0].Exporter = func(v interface{}, i int) interface{} { + switch v := v.(*PutIncompleteDetail); i { + case 0: + return &v.state + case 1: + return &v.sizeCache + case 2: + return &v.unknownFields + default: + return nil + } + } + } + type x struct{} + out := protoimpl.TypeBuilder{ + File: protoimpl.DescBuilder{ + GoPackagePath: reflect.TypeOf(x{}).PkgPath(), + RawDescriptor: file_v2_object_grpc_status_proto_rawDesc, + NumEnums: 2, + NumMessages: 1, + NumExtensions: 0, + NumServices: 0, + }, + GoTypes: file_v2_object_grpc_status_proto_goTypes, + DependencyIndexes: file_v2_object_grpc_status_proto_depIdxs, + EnumInfos: file_v2_object_grpc_status_proto_enumTypes, + MessageInfos: file_v2_object_grpc_status_proto_msgTypes, + }.Build() + File_v2_object_grpc_status_proto = out.File + file_v2_object_grpc_status_proto_rawDesc = nil + file_v2_object_grpc_status_proto_goTypes = nil + file_v2_object_grpc_status_proto_depIdxs = nil +} diff --git a/api/object/types.pb.go b/api/object/types.pb.go new file mode 100644 index 000000000..983dc0bbe --- /dev/null +++ b/api/object/types.pb.go @@ -0,0 +1,1111 @@ +// Code generated by protoc-gen-go. DO NOT EDIT. +// versions: +// protoc-gen-go v1.32.0 +// protoc v4.25.1 +// source: object/grpc/types.proto + +package object + +import ( + reflect "reflect" + sync "sync" + + "github.com/nspcc-dev/neofs-sdk-go/api/refs" + "github.com/nspcc-dev/neofs-sdk-go/api/session" + protoreflect "google.golang.org/protobuf/reflect/protoreflect" + protoimpl "google.golang.org/protobuf/runtime/protoimpl" +) + +const ( + // Verify that this generated code is sufficiently up-to-date. + _ = protoimpl.EnforceVersion(20 - protoimpl.MinVersion) + // Verify that runtime/protoimpl is sufficiently up-to-date. + _ = protoimpl.EnforceVersion(protoimpl.MaxVersion - 20) +) + +// Type of the object payload content. Only `REGULAR` type objects can be split, +// hence `TOMBSTONE`, `STORAGE_GROUP` and `LOCK` payload is limited by the maximum +// object size. +// +// String presentation of object type is the same as definition: +// * REGULAR +// * TOMBSTONE +// * STORAGE_GROUP +// * LOCK +// * LINK +type ObjectType int32 + +const ( + // Just a normal object + ObjectType_REGULAR ObjectType = 0 + // Used internally to identify deleted objects + ObjectType_TOMBSTONE ObjectType = 1 + // StorageGroup information + ObjectType_STORAGE_GROUP ObjectType = 2 + // Object lock + ObjectType_LOCK ObjectType = 3 + // Object that stores child object IDs for the split objects. + ObjectType_LINK ObjectType = 4 +) + +// Enum value maps for ObjectType. +var ( + ObjectType_name = map[int32]string{ + 0: "REGULAR", + 1: "TOMBSTONE", + 2: "STORAGE_GROUP", + 3: "LOCK", + 4: "LINK", + } + ObjectType_value = map[string]int32{ + "REGULAR": 0, + "TOMBSTONE": 1, + "STORAGE_GROUP": 2, + "LOCK": 3, + "LINK": 4, + } +) + +func (x ObjectType) Enum() *ObjectType { + p := new(ObjectType) + *p = x + return p +} + +func (x ObjectType) String() string { + return protoimpl.X.EnumStringOf(x.Descriptor(), protoreflect.EnumNumber(x)) +} + +func (ObjectType) Descriptor() protoreflect.EnumDescriptor { + return file_object_grpc_types_proto_enumTypes[0].Descriptor() +} + +func (ObjectType) Type() protoreflect.EnumType { + return &file_object_grpc_types_proto_enumTypes[0] +} + +func (x ObjectType) Number() protoreflect.EnumNumber { + return protoreflect.EnumNumber(x) +} + +// Deprecated: Use ObjectType.Descriptor instead. +func (ObjectType) EnumDescriptor() ([]byte, []int) { + return file_object_grpc_types_proto_rawDescGZIP(), []int{0} +} + +// Type of match expression +type MatchType int32 + +const ( + // Unknown. Not used + MatchType_MATCH_TYPE_UNSPECIFIED MatchType = 0 + // Full string match + MatchType_STRING_EQUAL MatchType = 1 + // Full string mismatch + MatchType_STRING_NOT_EQUAL MatchType = 2 + // Lack of key + MatchType_NOT_PRESENT MatchType = 3 + // String prefix match + MatchType_COMMON_PREFIX MatchType = 4 + // Numerical 'greater than' + MatchType_NUM_GT MatchType = 5 + // Numerical 'greater or equal than' + MatchType_NUM_GE MatchType = 6 + // Numerical 'less than' + MatchType_NUM_LT MatchType = 7 + // Numerical 'less or equal than' + MatchType_NUM_LE MatchType = 8 +) + +// Enum value maps for MatchType. +var ( + MatchType_name = map[int32]string{ + 0: "MATCH_TYPE_UNSPECIFIED", + 1: "STRING_EQUAL", + 2: "STRING_NOT_EQUAL", + 3: "NOT_PRESENT", + 4: "COMMON_PREFIX", + 5: "NUM_GT", + 6: "NUM_GE", + 7: "NUM_LT", + 8: "NUM_LE", + } + MatchType_value = map[string]int32{ + "MATCH_TYPE_UNSPECIFIED": 0, + "STRING_EQUAL": 1, + "STRING_NOT_EQUAL": 2, + "NOT_PRESENT": 3, + "COMMON_PREFIX": 4, + "NUM_GT": 5, + "NUM_GE": 6, + "NUM_LT": 7, + "NUM_LE": 8, + } +) + +func (x MatchType) Enum() *MatchType { + p := new(MatchType) + *p = x + return p +} + +func (x MatchType) String() string { + return protoimpl.X.EnumStringOf(x.Descriptor(), protoreflect.EnumNumber(x)) +} + +func (MatchType) Descriptor() protoreflect.EnumDescriptor { + return file_object_grpc_types_proto_enumTypes[1].Descriptor() +} + +func (MatchType) Type() protoreflect.EnumType { + return &file_object_grpc_types_proto_enumTypes[1] +} + +func (x MatchType) Number() protoreflect.EnumNumber { + return protoreflect.EnumNumber(x) +} + +// Deprecated: Use MatchType.Descriptor instead. +func (MatchType) EnumDescriptor() ([]byte, []int) { + return file_object_grpc_types_proto_rawDescGZIP(), []int{1} +} + +// Short header fields +type ShortHeader struct { + state protoimpl.MessageState + sizeCache protoimpl.SizeCache + unknownFields protoimpl.UnknownFields + + // Object format version. Effectively, the version of API library used to + // create particular object. + Version *refs.Version `protobuf:"bytes,1,opt,name=version,proto3" json:"version,omitempty"` + // Epoch when the object was created + CreationEpoch uint64 `protobuf:"varint,2,opt,name=creation_epoch,json=creationEpoch,proto3" json:"creation_epoch,omitempty"` + // Object's owner + OwnerId *refs.OwnerID `protobuf:"bytes,3,opt,name=owner_id,json=ownerID,proto3" json:"owner_id,omitempty"` + // Type of the object payload content + ObjectType ObjectType `protobuf:"varint,4,opt,name=object_type,json=objectType,proto3,enum=neo.fs.v2.object.ObjectType" json:"object_type,omitempty"` + // Size of payload in bytes. + // `0xFFFFFFFFFFFFFFFF` means `payload_length` is unknown + PayloadLength uint64 `protobuf:"varint,5,opt,name=payload_length,json=payloadLength,proto3" json:"payload_length,omitempty"` + // Hash of payload bytes + PayloadHash *refs.Checksum `protobuf:"bytes,6,opt,name=payload_hash,json=payloadHash,proto3" json:"payload_hash,omitempty"` + // Homomorphic hash of the object payload + HomomorphicHash *refs.Checksum `protobuf:"bytes,7,opt,name=homomorphic_hash,json=homomorphicHash,proto3" json:"homomorphic_hash,omitempty"` +} + +func (x *ShortHeader) Reset() { + *x = ShortHeader{} + if protoimpl.UnsafeEnabled { + mi := &file_object_grpc_types_proto_msgTypes[0] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) + } +} + +func (x *ShortHeader) String() string { + return protoimpl.X.MessageStringOf(x) +} + +func (*ShortHeader) ProtoMessage() {} + +func (x *ShortHeader) ProtoReflect() protoreflect.Message { + mi := &file_object_grpc_types_proto_msgTypes[0] + if protoimpl.UnsafeEnabled && x != nil { + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + if ms.LoadMessageInfo() == nil { + ms.StoreMessageInfo(mi) + } + return ms + } + return mi.MessageOf(x) +} + +// Deprecated: Use ShortHeader.ProtoReflect.Descriptor instead. +func (*ShortHeader) Descriptor() ([]byte, []int) { + return file_object_grpc_types_proto_rawDescGZIP(), []int{0} +} + +func (x *ShortHeader) GetVersion() *refs.Version { + if x != nil { + return x.Version + } + return nil +} + +func (x *ShortHeader) GetCreationEpoch() uint64 { + if x != nil { + return x.CreationEpoch + } + return 0 +} + +func (x *ShortHeader) GetOwnerId() *refs.OwnerID { + if x != nil { + return x.OwnerId + } + return nil +} + +func (x *ShortHeader) GetObjectType() ObjectType { + if x != nil { + return x.ObjectType + } + return ObjectType_REGULAR +} + +func (x *ShortHeader) GetPayloadLength() uint64 { + if x != nil { + return x.PayloadLength + } + return 0 +} + +func (x *ShortHeader) GetPayloadHash() *refs.Checksum { + if x != nil { + return x.PayloadHash + } + return nil +} + +func (x *ShortHeader) GetHomomorphicHash() *refs.Checksum { + if x != nil { + return x.HomomorphicHash + } + return nil +} + +// Object Header +type Header struct { + state protoimpl.MessageState + sizeCache protoimpl.SizeCache + unknownFields protoimpl.UnknownFields + + // Object format version. Effectively, the version of API library used to + // create particular object + Version *refs.Version `protobuf:"bytes,1,opt,name=version,proto3" json:"version,omitempty"` + // Object's container + ContainerId *refs.ContainerID `protobuf:"bytes,2,opt,name=container_id,json=containerID,proto3" json:"container_id,omitempty"` + // Object's owner + OwnerId *refs.OwnerID `protobuf:"bytes,3,opt,name=owner_id,json=ownerID,proto3" json:"owner_id,omitempty"` + // Object creation Epoch + CreationEpoch uint64 `protobuf:"varint,4,opt,name=creation_epoch,json=creationEpoch,proto3" json:"creation_epoch,omitempty"` + // Size of payload in bytes. + // `0xFFFFFFFFFFFFFFFF` means `payload_length` is unknown. + PayloadLength uint64 `protobuf:"varint,5,opt,name=payload_length,json=payloadLength,proto3" json:"payload_length,omitempty"` + // Hash of payload bytes + PayloadHash *refs.Checksum `protobuf:"bytes,6,opt,name=payload_hash,json=payloadHash,proto3" json:"payload_hash,omitempty"` + // Type of the object payload content + ObjectType ObjectType `protobuf:"varint,7,opt,name=object_type,json=objectType,proto3,enum=neo.fs.v2.object.ObjectType" json:"object_type,omitempty"` + // Homomorphic hash of the object payload + HomomorphicHash *refs.Checksum `protobuf:"bytes,8,opt,name=homomorphic_hash,json=homomorphicHash,proto3" json:"homomorphic_hash,omitempty"` + // Session token, if it was used during Object creation. Need it to verify + // integrity and authenticity out of Request scope. + SessionToken *session.SessionToken `protobuf:"bytes,9,opt,name=session_token,json=sessionToken,proto3" json:"session_token,omitempty"` + // User-defined object attributes. Attributes vary in length from object to + // object, so keep an eye on the entire Header limit depending on the context. + Attributes []*Header_Attribute `protobuf:"bytes,10,rep,name=attributes,proto3" json:"attributes,omitempty"` + // Position of the object in the split hierarchy + Split *Header_Split `protobuf:"bytes,11,opt,name=split,proto3" json:"split,omitempty"` +} + +func (x *Header) Reset() { + *x = Header{} + if protoimpl.UnsafeEnabled { + mi := &file_object_grpc_types_proto_msgTypes[1] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) + } +} + +func (x *Header) String() string { + return protoimpl.X.MessageStringOf(x) +} + +func (*Header) ProtoMessage() {} + +func (x *Header) ProtoReflect() protoreflect.Message { + mi := &file_object_grpc_types_proto_msgTypes[1] + if protoimpl.UnsafeEnabled && x != nil { + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + if ms.LoadMessageInfo() == nil { + ms.StoreMessageInfo(mi) + } + return ms + } + return mi.MessageOf(x) +} + +// Deprecated: Use Header.ProtoReflect.Descriptor instead. +func (*Header) Descriptor() ([]byte, []int) { + return file_object_grpc_types_proto_rawDescGZIP(), []int{1} +} + +func (x *Header) GetVersion() *refs.Version { + if x != nil { + return x.Version + } + return nil +} + +func (x *Header) GetContainerId() *refs.ContainerID { + if x != nil { + return x.ContainerId + } + return nil +} + +func (x *Header) GetOwnerId() *refs.OwnerID { + if x != nil { + return x.OwnerId + } + return nil +} + +func (x *Header) GetCreationEpoch() uint64 { + if x != nil { + return x.CreationEpoch + } + return 0 +} + +func (x *Header) GetPayloadLength() uint64 { + if x != nil { + return x.PayloadLength + } + return 0 +} + +func (x *Header) GetPayloadHash() *refs.Checksum { + if x != nil { + return x.PayloadHash + } + return nil +} + +func (x *Header) GetObjectType() ObjectType { + if x != nil { + return x.ObjectType + } + return ObjectType_REGULAR +} + +func (x *Header) GetHomomorphicHash() *refs.Checksum { + if x != nil { + return x.HomomorphicHash + } + return nil +} + +func (x *Header) GetSessionToken() *session.SessionToken { + if x != nil { + return x.SessionToken + } + return nil +} + +func (x *Header) GetAttributes() []*Header_Attribute { + if x != nil { + return x.Attributes + } + return nil +} + +func (x *Header) GetSplit() *Header_Split { + if x != nil { + return x.Split + } + return nil +} + +// Object structure. Object is immutable and content-addressed. It means +// `ObjectID` will change if the header or the payload changes. It's calculated as a +// hash of header field which contains hash of the object's payload. +// +// For non-regular object types payload format depends on object type specified +// in the header. +type Object struct { + state protoimpl.MessageState + sizeCache protoimpl.SizeCache + unknownFields protoimpl.UnknownFields + + // Object's unique identifier. + ObjectId *refs.ObjectID `protobuf:"bytes,1,opt,name=object_id,json=objectID,proto3" json:"object_id,omitempty"` + // Signed object_id + Signature *refs.Signature `protobuf:"bytes,2,opt,name=signature,proto3" json:"signature,omitempty"` + // Object metadata headers + Header *Header `protobuf:"bytes,3,opt,name=header,proto3" json:"header,omitempty"` + // Payload bytes + Payload []byte `protobuf:"bytes,4,opt,name=payload,proto3" json:"payload,omitempty"` +} + +func (x *Object) Reset() { + *x = Object{} + if protoimpl.UnsafeEnabled { + mi := &file_object_grpc_types_proto_msgTypes[2] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) + } +} + +func (x *Object) String() string { + return protoimpl.X.MessageStringOf(x) +} + +func (*Object) ProtoMessage() {} + +func (x *Object) ProtoReflect() protoreflect.Message { + mi := &file_object_grpc_types_proto_msgTypes[2] + if protoimpl.UnsafeEnabled && x != nil { + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + if ms.LoadMessageInfo() == nil { + ms.StoreMessageInfo(mi) + } + return ms + } + return mi.MessageOf(x) +} + +// Deprecated: Use Object.ProtoReflect.Descriptor instead. +func (*Object) Descriptor() ([]byte, []int) { + return file_object_grpc_types_proto_rawDescGZIP(), []int{2} +} + +func (x *Object) GetObjectId() *refs.ObjectID { + if x != nil { + return x.ObjectId + } + return nil +} + +func (x *Object) GetSignature() *refs.Signature { + if x != nil { + return x.Signature + } + return nil +} + +func (x *Object) GetHeader() *Header { + if x != nil { + return x.Header + } + return nil +} + +func (x *Object) GetPayload() []byte { + if x != nil { + return x.Payload + } + return nil +} + +// Meta information of split hierarchy for object assembly. With the last part +// one can traverse linked list of split hierarchy back to the first part and +// assemble the original object. With a linking object one can assemble an object +// right from the object parts. +type SplitInfo struct { + state protoimpl.MessageState + sizeCache protoimpl.SizeCache + unknownFields protoimpl.UnknownFields + + // DEPRECATED. Was used as an identifier of a split chain. Use the first + // part ID instead. + // 16 byte UUID used to identify the split object hierarchy parts. + SplitId []byte `protobuf:"bytes,1,opt,name=split_id,json=splitId,proto3" json:"split_id,omitempty"` + // The identifier of the last object in split hierarchy parts. It contains + // split header with the original object header. + LastPart *refs.ObjectID `protobuf:"bytes,2,opt,name=last_part,json=lastPart,proto3" json:"last_part,omitempty"` + // The identifier of a linking object for split hierarchy parts. It contains + // split header with the original object header and a sorted list of + // object parts. + Link *refs.ObjectID `protobuf:"bytes,3,opt,name=link,proto3" json:"link,omitempty"` + // Identifier of the first part of the origin object. Known to all the split + // parts except the first one. Identifies the split and allows to differ them. + FirstPart *refs.ObjectID `protobuf:"bytes,4,opt,name=first_part,json=firstPart,proto3" json:"first_part,omitempty"` +} + +func (x *SplitInfo) Reset() { + *x = SplitInfo{} + if protoimpl.UnsafeEnabled { + mi := &file_object_grpc_types_proto_msgTypes[3] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) + } +} + +func (x *SplitInfo) String() string { + return protoimpl.X.MessageStringOf(x) +} + +func (*SplitInfo) ProtoMessage() {} + +func (x *SplitInfo) ProtoReflect() protoreflect.Message { + mi := &file_object_grpc_types_proto_msgTypes[3] + if protoimpl.UnsafeEnabled && x != nil { + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + if ms.LoadMessageInfo() == nil { + ms.StoreMessageInfo(mi) + } + return ms + } + return mi.MessageOf(x) +} + +// Deprecated: Use SplitInfo.ProtoReflect.Descriptor instead. +func (*SplitInfo) Descriptor() ([]byte, []int) { + return file_object_grpc_types_proto_rawDescGZIP(), []int{3} +} + +func (x *SplitInfo) GetSplitId() []byte { + if x != nil { + return x.SplitId + } + return nil +} + +func (x *SplitInfo) GetLastPart() *refs.ObjectID { + if x != nil { + return x.LastPart + } + return nil +} + +func (x *SplitInfo) GetLink() *refs.ObjectID { + if x != nil { + return x.Link + } + return nil +} + +func (x *SplitInfo) GetFirstPart() *refs.ObjectID { + if x != nil { + return x.FirstPart + } + return nil +} + +// `Attribute` is a user-defined Key-Value metadata pair attached to an +// object. +// +// Key name must be an object-unique valid UTF-8 string. Value can't be empty. +// Objects with duplicated attribute names or attributes with empty values +// will be considered invalid. +// +// There are some "well-known" attributes starting with `__NEOFS__` prefix +// that affect system behaviour: +// +// - __NEOFS__EXPIRATION_EPOCH \ +// Tells GC to delete object after that epoch +// - __NEOFS__TICK_EPOCH \ +// Decimal number that defines what epoch must produce +// object notification with UTF-8 object address in a +// body (`0` value produces notification right after +// object put). +// DEPRECATED: attribute ignored by servers. +// - __NEOFS__TICK_TOPIC \ +// UTF-8 string topic ID that is used for object notification. +// DEPRECATED: attribute ignored by servers. +// +// And some well-known attributes used by applications only: +// +// - Name \ +// Human-friendly name +// - FileName \ +// File name to be associated with the object on saving +// - FilePath \ +// Full path to be associated with the object on saving. Should start with a +// '/' and use '/' as a delimiting symbol. Trailing '/' should be +// interpreted as a virtual directory marker. If an object has conflicting +// FilePath and FileName, FilePath should have higher priority, because it +// is used to construct the directory tree. FilePath with trailing '/' and +// non-empty FileName attribute should not be used together. +// - Timestamp \ +// User-defined local time of object creation in Unix Timestamp format +// - Content-Type \ +// MIME Content Type of object's payload +// +// For detailed description of each well-known attribute please see the +// corresponding section in NeoFS Technical Specification. +type Header_Attribute struct { + state protoimpl.MessageState + sizeCache protoimpl.SizeCache + unknownFields protoimpl.UnknownFields + + // string key to the object attribute + Key string `protobuf:"bytes,1,opt,name=key,proto3" json:"key,omitempty"` + // string value of the object attribute + Value string `protobuf:"bytes,2,opt,name=value,proto3" json:"value,omitempty"` +} + +func (x *Header_Attribute) Reset() { + *x = Header_Attribute{} + if protoimpl.UnsafeEnabled { + mi := &file_object_grpc_types_proto_msgTypes[4] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) + } +} + +func (x *Header_Attribute) String() string { + return protoimpl.X.MessageStringOf(x) +} + +func (*Header_Attribute) ProtoMessage() {} + +func (x *Header_Attribute) ProtoReflect() protoreflect.Message { + mi := &file_object_grpc_types_proto_msgTypes[4] + if protoimpl.UnsafeEnabled && x != nil { + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + if ms.LoadMessageInfo() == nil { + ms.StoreMessageInfo(mi) + } + return ms + } + return mi.MessageOf(x) +} + +// Deprecated: Use Header_Attribute.ProtoReflect.Descriptor instead. +func (*Header_Attribute) Descriptor() ([]byte, []int) { + return file_object_grpc_types_proto_rawDescGZIP(), []int{1, 0} +} + +func (x *Header_Attribute) GetKey() string { + if x != nil { + return x.Key + } + return "" +} + +func (x *Header_Attribute) GetValue() string { + if x != nil { + return x.Value + } + return "" +} + +// Bigger objects can be split into a chain of smaller objects. Information +// about inter-dependencies between spawned objects and how to re-construct +// the original one is in the `Split` headers. Parent and children objects +// must be within the same container. +type Header_Split struct { + state protoimpl.MessageState + sizeCache protoimpl.SizeCache + unknownFields protoimpl.UnknownFields + + // Identifier of the origin object. Known only to the minor child. + Parent *refs.ObjectID `protobuf:"bytes,1,opt,name=parent,proto3" json:"parent,omitempty"` + // Identifier of the left split neighbor + Previous *refs.ObjectID `protobuf:"bytes,2,opt,name=previous,proto3" json:"previous,omitempty"` + // `signature` field of the parent object. Used to reconstruct parent. + ParentSignature *refs.Signature `protobuf:"bytes,3,opt,name=parent_signature,json=parentSignature,proto3" json:"parent_signature,omitempty"` + // `header` field of the parent object. Used to reconstruct parent. + ParentHeader *Header `protobuf:"bytes,4,opt,name=parent_header,json=parentHeader,proto3" json:"parent_header,omitempty"` + // DEPRECATED. Was used before creating the separate LINK object type. Keep + // child objects list in the LINK object's payload. + // List of identifiers of the objects generated by splitting current one. + Children []*refs.ObjectID `protobuf:"bytes,5,rep,name=children,proto3" json:"children,omitempty"` + // DEPRECATED. Was used as an identifier of a split chain. Use the first + // part ID instead. + // 16 byte UUIDv4 used to identify the split object hierarchy parts. Must be + // unique inside container. All objects participating in the split must have + // the same `split_id` value. + SplitId []byte `protobuf:"bytes,6,opt,name=split_id,json=splitID,proto3" json:"split_id,omitempty"` + // Identifier of the first part of the origin object. Known to all the split + // parts except the first one. Identifies the split and allows to differ them. + First *refs.ObjectID `protobuf:"bytes,7,opt,name=first,proto3" json:"first,omitempty"` +} + +func (x *Header_Split) Reset() { + *x = Header_Split{} + if protoimpl.UnsafeEnabled { + mi := &file_object_grpc_types_proto_msgTypes[5] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) + } +} + +func (x *Header_Split) String() string { + return protoimpl.X.MessageStringOf(x) +} + +func (*Header_Split) ProtoMessage() {} + +func (x *Header_Split) ProtoReflect() protoreflect.Message { + mi := &file_object_grpc_types_proto_msgTypes[5] + if protoimpl.UnsafeEnabled && x != nil { + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + if ms.LoadMessageInfo() == nil { + ms.StoreMessageInfo(mi) + } + return ms + } + return mi.MessageOf(x) +} + +// Deprecated: Use Header_Split.ProtoReflect.Descriptor instead. +func (*Header_Split) Descriptor() ([]byte, []int) { + return file_object_grpc_types_proto_rawDescGZIP(), []int{1, 1} +} + +func (x *Header_Split) GetParent() *refs.ObjectID { + if x != nil { + return x.Parent + } + return nil +} + +func (x *Header_Split) GetPrevious() *refs.ObjectID { + if x != nil { + return x.Previous + } + return nil +} + +func (x *Header_Split) GetParentSignature() *refs.Signature { + if x != nil { + return x.ParentSignature + } + return nil +} + +func (x *Header_Split) GetParentHeader() *Header { + if x != nil { + return x.ParentHeader + } + return nil +} + +func (x *Header_Split) GetChildren() []*refs.ObjectID { + if x != nil { + return x.Children + } + return nil +} + +func (x *Header_Split) GetSplitId() []byte { + if x != nil { + return x.SplitId + } + return nil +} + +func (x *Header_Split) GetFirst() *refs.ObjectID { + if x != nil { + return x.First + } + return nil +} + +var File_object_grpc_types_proto protoreflect.FileDescriptor + +var file_object_grpc_types_proto_rawDesc = []byte{ + 0x0a, 0x17, 0x6f, 0x62, 0x6a, 0x65, 0x63, 0x74, 0x2f, 0x67, 0x72, 0x70, 0x63, 0x2f, 0x74, 0x79, + 0x70, 0x65, 0x73, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x12, 0x10, 0x6e, 0x65, 0x6f, 0x2e, 0x66, + 0x73, 0x2e, 0x76, 0x32, 0x2e, 0x6f, 0x62, 0x6a, 0x65, 0x63, 0x74, 0x1a, 0x15, 0x72, 0x65, 0x66, + 0x73, 0x2f, 0x67, 0x72, 0x70, 0x63, 0x2f, 0x74, 0x79, 0x70, 0x65, 0x73, 0x2e, 0x70, 0x72, 0x6f, + 0x74, 0x6f, 0x1a, 0x18, 0x73, 0x65, 0x73, 0x73, 0x69, 0x6f, 0x6e, 0x2f, 0x67, 0x72, 0x70, 0x63, + 0x2f, 0x74, 0x79, 0x70, 0x65, 0x73, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x22, 0x83, 0x03, 0x0a, + 0x0b, 0x53, 0x68, 0x6f, 0x72, 0x74, 0x48, 0x65, 0x61, 0x64, 0x65, 0x72, 0x12, 0x31, 0x0a, 0x07, + 0x76, 0x65, 0x72, 0x73, 0x69, 0x6f, 0x6e, 0x18, 0x01, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x17, 0x2e, + 0x6e, 0x65, 0x6f, 0x2e, 0x66, 0x73, 0x2e, 0x76, 0x32, 0x2e, 0x72, 0x65, 0x66, 0x73, 0x2e, 0x56, + 0x65, 0x72, 0x73, 0x69, 0x6f, 0x6e, 0x52, 0x07, 0x76, 0x65, 0x72, 0x73, 0x69, 0x6f, 0x6e, 0x12, + 0x25, 0x0a, 0x0e, 0x63, 0x72, 0x65, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x5f, 0x65, 0x70, 0x6f, 0x63, + 0x68, 0x18, 0x02, 0x20, 0x01, 0x28, 0x04, 0x52, 0x0d, 0x63, 0x72, 0x65, 0x61, 0x74, 0x69, 0x6f, + 0x6e, 0x45, 0x70, 0x6f, 0x63, 0x68, 0x12, 0x32, 0x0a, 0x08, 0x6f, 0x77, 0x6e, 0x65, 0x72, 0x5f, + 0x69, 0x64, 0x18, 0x03, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x17, 0x2e, 0x6e, 0x65, 0x6f, 0x2e, 0x66, + 0x73, 0x2e, 0x76, 0x32, 0x2e, 0x72, 0x65, 0x66, 0x73, 0x2e, 0x4f, 0x77, 0x6e, 0x65, 0x72, 0x49, + 0x44, 0x52, 0x07, 0x6f, 0x77, 0x6e, 0x65, 0x72, 0x49, 0x44, 0x12, 0x3d, 0x0a, 0x0b, 0x6f, 0x62, + 0x6a, 0x65, 0x63, 0x74, 0x5f, 0x74, 0x79, 0x70, 0x65, 0x18, 0x04, 0x20, 0x01, 0x28, 0x0e, 0x32, + 0x1c, 0x2e, 0x6e, 0x65, 0x6f, 0x2e, 0x66, 0x73, 0x2e, 0x76, 0x32, 0x2e, 0x6f, 0x62, 0x6a, 0x65, + 0x63, 0x74, 0x2e, 0x4f, 0x62, 0x6a, 0x65, 0x63, 0x74, 0x54, 0x79, 0x70, 0x65, 0x52, 0x0a, 0x6f, + 0x62, 0x6a, 0x65, 0x63, 0x74, 0x54, 0x79, 0x70, 0x65, 0x12, 0x25, 0x0a, 0x0e, 0x70, 0x61, 0x79, + 0x6c, 0x6f, 0x61, 0x64, 0x5f, 0x6c, 0x65, 0x6e, 0x67, 0x74, 0x68, 0x18, 0x05, 0x20, 0x01, 0x28, + 0x04, 0x52, 0x0d, 0x70, 0x61, 0x79, 0x6c, 0x6f, 0x61, 0x64, 0x4c, 0x65, 0x6e, 0x67, 0x74, 0x68, + 0x12, 0x3b, 0x0a, 0x0c, 0x70, 0x61, 0x79, 0x6c, 0x6f, 0x61, 0x64, 0x5f, 0x68, 0x61, 0x73, 0x68, + 0x18, 0x06, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x18, 0x2e, 0x6e, 0x65, 0x6f, 0x2e, 0x66, 0x73, 0x2e, + 0x76, 0x32, 0x2e, 0x72, 0x65, 0x66, 0x73, 0x2e, 0x43, 0x68, 0x65, 0x63, 0x6b, 0x73, 0x75, 0x6d, + 0x52, 0x0b, 0x70, 0x61, 0x79, 0x6c, 0x6f, 0x61, 0x64, 0x48, 0x61, 0x73, 0x68, 0x12, 0x43, 0x0a, + 0x10, 0x68, 0x6f, 0x6d, 0x6f, 0x6d, 0x6f, 0x72, 0x70, 0x68, 0x69, 0x63, 0x5f, 0x68, 0x61, 0x73, + 0x68, 0x18, 0x07, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x18, 0x2e, 0x6e, 0x65, 0x6f, 0x2e, 0x66, 0x73, + 0x2e, 0x76, 0x32, 0x2e, 0x72, 0x65, 0x66, 0x73, 0x2e, 0x43, 0x68, 0x65, 0x63, 0x6b, 0x73, 0x75, + 0x6d, 0x52, 0x0f, 0x68, 0x6f, 0x6d, 0x6f, 0x6d, 0x6f, 0x72, 0x70, 0x68, 0x69, 0x63, 0x48, 0x61, + 0x73, 0x68, 0x22, 0xab, 0x08, 0x0a, 0x06, 0x48, 0x65, 0x61, 0x64, 0x65, 0x72, 0x12, 0x31, 0x0a, + 0x07, 0x76, 0x65, 0x72, 0x73, 0x69, 0x6f, 0x6e, 0x18, 0x01, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x17, + 0x2e, 0x6e, 0x65, 0x6f, 0x2e, 0x66, 0x73, 0x2e, 0x76, 0x32, 0x2e, 0x72, 0x65, 0x66, 0x73, 0x2e, + 0x56, 0x65, 0x72, 0x73, 0x69, 0x6f, 0x6e, 0x52, 0x07, 0x76, 0x65, 0x72, 0x73, 0x69, 0x6f, 0x6e, + 0x12, 0x3e, 0x0a, 0x0c, 0x63, 0x6f, 0x6e, 0x74, 0x61, 0x69, 0x6e, 0x65, 0x72, 0x5f, 0x69, 0x64, + 0x18, 0x02, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x1b, 0x2e, 0x6e, 0x65, 0x6f, 0x2e, 0x66, 0x73, 0x2e, + 0x76, 0x32, 0x2e, 0x72, 0x65, 0x66, 0x73, 0x2e, 0x43, 0x6f, 0x6e, 0x74, 0x61, 0x69, 0x6e, 0x65, + 0x72, 0x49, 0x44, 0x52, 0x0b, 0x63, 0x6f, 0x6e, 0x74, 0x61, 0x69, 0x6e, 0x65, 0x72, 0x49, 0x44, + 0x12, 0x32, 0x0a, 0x08, 0x6f, 0x77, 0x6e, 0x65, 0x72, 0x5f, 0x69, 0x64, 0x18, 0x03, 0x20, 0x01, + 0x28, 0x0b, 0x32, 0x17, 0x2e, 0x6e, 0x65, 0x6f, 0x2e, 0x66, 0x73, 0x2e, 0x76, 0x32, 0x2e, 0x72, + 0x65, 0x66, 0x73, 0x2e, 0x4f, 0x77, 0x6e, 0x65, 0x72, 0x49, 0x44, 0x52, 0x07, 0x6f, 0x77, 0x6e, + 0x65, 0x72, 0x49, 0x44, 0x12, 0x25, 0x0a, 0x0e, 0x63, 0x72, 0x65, 0x61, 0x74, 0x69, 0x6f, 0x6e, + 0x5f, 0x65, 0x70, 0x6f, 0x63, 0x68, 0x18, 0x04, 0x20, 0x01, 0x28, 0x04, 0x52, 0x0d, 0x63, 0x72, + 0x65, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x45, 0x70, 0x6f, 0x63, 0x68, 0x12, 0x25, 0x0a, 0x0e, 0x70, + 0x61, 0x79, 0x6c, 0x6f, 0x61, 0x64, 0x5f, 0x6c, 0x65, 0x6e, 0x67, 0x74, 0x68, 0x18, 0x05, 0x20, + 0x01, 0x28, 0x04, 0x52, 0x0d, 0x70, 0x61, 0x79, 0x6c, 0x6f, 0x61, 0x64, 0x4c, 0x65, 0x6e, 0x67, + 0x74, 0x68, 0x12, 0x3b, 0x0a, 0x0c, 0x70, 0x61, 0x79, 0x6c, 0x6f, 0x61, 0x64, 0x5f, 0x68, 0x61, + 0x73, 0x68, 0x18, 0x06, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x18, 0x2e, 0x6e, 0x65, 0x6f, 0x2e, 0x66, + 0x73, 0x2e, 0x76, 0x32, 0x2e, 0x72, 0x65, 0x66, 0x73, 0x2e, 0x43, 0x68, 0x65, 0x63, 0x6b, 0x73, + 0x75, 0x6d, 0x52, 0x0b, 0x70, 0x61, 0x79, 0x6c, 0x6f, 0x61, 0x64, 0x48, 0x61, 0x73, 0x68, 0x12, + 0x3d, 0x0a, 0x0b, 0x6f, 0x62, 0x6a, 0x65, 0x63, 0x74, 0x5f, 0x74, 0x79, 0x70, 0x65, 0x18, 0x07, + 0x20, 0x01, 0x28, 0x0e, 0x32, 0x1c, 0x2e, 0x6e, 0x65, 0x6f, 0x2e, 0x66, 0x73, 0x2e, 0x76, 0x32, + 0x2e, 0x6f, 0x62, 0x6a, 0x65, 0x63, 0x74, 0x2e, 0x4f, 0x62, 0x6a, 0x65, 0x63, 0x74, 0x54, 0x79, + 0x70, 0x65, 0x52, 0x0a, 0x6f, 0x62, 0x6a, 0x65, 0x63, 0x74, 0x54, 0x79, 0x70, 0x65, 0x12, 0x43, + 0x0a, 0x10, 0x68, 0x6f, 0x6d, 0x6f, 0x6d, 0x6f, 0x72, 0x70, 0x68, 0x69, 0x63, 0x5f, 0x68, 0x61, + 0x73, 0x68, 0x18, 0x08, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x18, 0x2e, 0x6e, 0x65, 0x6f, 0x2e, 0x66, + 0x73, 0x2e, 0x76, 0x32, 0x2e, 0x72, 0x65, 0x66, 0x73, 0x2e, 0x43, 0x68, 0x65, 0x63, 0x6b, 0x73, + 0x75, 0x6d, 0x52, 0x0f, 0x68, 0x6f, 0x6d, 0x6f, 0x6d, 0x6f, 0x72, 0x70, 0x68, 0x69, 0x63, 0x48, + 0x61, 0x73, 0x68, 0x12, 0x44, 0x0a, 0x0d, 0x73, 0x65, 0x73, 0x73, 0x69, 0x6f, 0x6e, 0x5f, 0x74, + 0x6f, 0x6b, 0x65, 0x6e, 0x18, 0x09, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x1f, 0x2e, 0x6e, 0x65, 0x6f, + 0x2e, 0x66, 0x73, 0x2e, 0x76, 0x32, 0x2e, 0x73, 0x65, 0x73, 0x73, 0x69, 0x6f, 0x6e, 0x2e, 0x53, + 0x65, 0x73, 0x73, 0x69, 0x6f, 0x6e, 0x54, 0x6f, 0x6b, 0x65, 0x6e, 0x52, 0x0c, 0x73, 0x65, 0x73, + 0x73, 0x69, 0x6f, 0x6e, 0x54, 0x6f, 0x6b, 0x65, 0x6e, 0x12, 0x42, 0x0a, 0x0a, 0x61, 0x74, 0x74, + 0x72, 0x69, 0x62, 0x75, 0x74, 0x65, 0x73, 0x18, 0x0a, 0x20, 0x03, 0x28, 0x0b, 0x32, 0x22, 0x2e, + 0x6e, 0x65, 0x6f, 0x2e, 0x66, 0x73, 0x2e, 0x76, 0x32, 0x2e, 0x6f, 0x62, 0x6a, 0x65, 0x63, 0x74, + 0x2e, 0x48, 0x65, 0x61, 0x64, 0x65, 0x72, 0x2e, 0x41, 0x74, 0x74, 0x72, 0x69, 0x62, 0x75, 0x74, + 0x65, 0x52, 0x0a, 0x61, 0x74, 0x74, 0x72, 0x69, 0x62, 0x75, 0x74, 0x65, 0x73, 0x12, 0x34, 0x0a, + 0x05, 0x73, 0x70, 0x6c, 0x69, 0x74, 0x18, 0x0b, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x1e, 0x2e, 0x6e, + 0x65, 0x6f, 0x2e, 0x66, 0x73, 0x2e, 0x76, 0x32, 0x2e, 0x6f, 0x62, 0x6a, 0x65, 0x63, 0x74, 0x2e, + 0x48, 0x65, 0x61, 0x64, 0x65, 0x72, 0x2e, 0x53, 0x70, 0x6c, 0x69, 0x74, 0x52, 0x05, 0x73, 0x70, + 0x6c, 0x69, 0x74, 0x1a, 0x33, 0x0a, 0x09, 0x41, 0x74, 0x74, 0x72, 0x69, 0x62, 0x75, 0x74, 0x65, + 0x12, 0x10, 0x0a, 0x03, 0x6b, 0x65, 0x79, 0x18, 0x01, 0x20, 0x01, 0x28, 0x09, 0x52, 0x03, 0x6b, + 0x65, 0x79, 0x12, 0x14, 0x0a, 0x05, 0x76, 0x61, 0x6c, 0x75, 0x65, 0x18, 0x02, 0x20, 0x01, 0x28, + 0x09, 0x52, 0x05, 0x76, 0x61, 0x6c, 0x75, 0x65, 0x1a, 0xf5, 0x02, 0x0a, 0x05, 0x53, 0x70, 0x6c, + 0x69, 0x74, 0x12, 0x30, 0x0a, 0x06, 0x70, 0x61, 0x72, 0x65, 0x6e, 0x74, 0x18, 0x01, 0x20, 0x01, + 0x28, 0x0b, 0x32, 0x18, 0x2e, 0x6e, 0x65, 0x6f, 0x2e, 0x66, 0x73, 0x2e, 0x76, 0x32, 0x2e, 0x72, + 0x65, 0x66, 0x73, 0x2e, 0x4f, 0x62, 0x6a, 0x65, 0x63, 0x74, 0x49, 0x44, 0x52, 0x06, 0x70, 0x61, + 0x72, 0x65, 0x6e, 0x74, 0x12, 0x34, 0x0a, 0x08, 0x70, 0x72, 0x65, 0x76, 0x69, 0x6f, 0x75, 0x73, + 0x18, 0x02, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x18, 0x2e, 0x6e, 0x65, 0x6f, 0x2e, 0x66, 0x73, 0x2e, + 0x76, 0x32, 0x2e, 0x72, 0x65, 0x66, 0x73, 0x2e, 0x4f, 0x62, 0x6a, 0x65, 0x63, 0x74, 0x49, 0x44, + 0x52, 0x08, 0x70, 0x72, 0x65, 0x76, 0x69, 0x6f, 0x75, 0x73, 0x12, 0x44, 0x0a, 0x10, 0x70, 0x61, + 0x72, 0x65, 0x6e, 0x74, 0x5f, 0x73, 0x69, 0x67, 0x6e, 0x61, 0x74, 0x75, 0x72, 0x65, 0x18, 0x03, + 0x20, 0x01, 0x28, 0x0b, 0x32, 0x19, 0x2e, 0x6e, 0x65, 0x6f, 0x2e, 0x66, 0x73, 0x2e, 0x76, 0x32, + 0x2e, 0x72, 0x65, 0x66, 0x73, 0x2e, 0x53, 0x69, 0x67, 0x6e, 0x61, 0x74, 0x75, 0x72, 0x65, 0x52, + 0x0f, 0x70, 0x61, 0x72, 0x65, 0x6e, 0x74, 0x53, 0x69, 0x67, 0x6e, 0x61, 0x74, 0x75, 0x72, 0x65, + 0x12, 0x3d, 0x0a, 0x0d, 0x70, 0x61, 0x72, 0x65, 0x6e, 0x74, 0x5f, 0x68, 0x65, 0x61, 0x64, 0x65, + 0x72, 0x18, 0x04, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x18, 0x2e, 0x6e, 0x65, 0x6f, 0x2e, 0x66, 0x73, + 0x2e, 0x76, 0x32, 0x2e, 0x6f, 0x62, 0x6a, 0x65, 0x63, 0x74, 0x2e, 0x48, 0x65, 0x61, 0x64, 0x65, + 0x72, 0x52, 0x0c, 0x70, 0x61, 0x72, 0x65, 0x6e, 0x74, 0x48, 0x65, 0x61, 0x64, 0x65, 0x72, 0x12, + 0x34, 0x0a, 0x08, 0x63, 0x68, 0x69, 0x6c, 0x64, 0x72, 0x65, 0x6e, 0x18, 0x05, 0x20, 0x03, 0x28, + 0x0b, 0x32, 0x18, 0x2e, 0x6e, 0x65, 0x6f, 0x2e, 0x66, 0x73, 0x2e, 0x76, 0x32, 0x2e, 0x72, 0x65, + 0x66, 0x73, 0x2e, 0x4f, 0x62, 0x6a, 0x65, 0x63, 0x74, 0x49, 0x44, 0x52, 0x08, 0x63, 0x68, 0x69, + 0x6c, 0x64, 0x72, 0x65, 0x6e, 0x12, 0x19, 0x0a, 0x08, 0x73, 0x70, 0x6c, 0x69, 0x74, 0x5f, 0x69, + 0x64, 0x18, 0x06, 0x20, 0x01, 0x28, 0x0c, 0x52, 0x07, 0x73, 0x70, 0x6c, 0x69, 0x74, 0x49, 0x44, + 0x12, 0x2e, 0x0a, 0x05, 0x66, 0x69, 0x72, 0x73, 0x74, 0x18, 0x07, 0x20, 0x01, 0x28, 0x0b, 0x32, + 0x18, 0x2e, 0x6e, 0x65, 0x6f, 0x2e, 0x66, 0x73, 0x2e, 0x76, 0x32, 0x2e, 0x72, 0x65, 0x66, 0x73, + 0x2e, 0x4f, 0x62, 0x6a, 0x65, 0x63, 0x74, 0x49, 0x44, 0x52, 0x05, 0x66, 0x69, 0x72, 0x73, 0x74, + 0x22, 0xc4, 0x01, 0x0a, 0x06, 0x4f, 0x62, 0x6a, 0x65, 0x63, 0x74, 0x12, 0x35, 0x0a, 0x09, 0x6f, + 0x62, 0x6a, 0x65, 0x63, 0x74, 0x5f, 0x69, 0x64, 0x18, 0x01, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x18, + 0x2e, 0x6e, 0x65, 0x6f, 0x2e, 0x66, 0x73, 0x2e, 0x76, 0x32, 0x2e, 0x72, 0x65, 0x66, 0x73, 0x2e, + 0x4f, 0x62, 0x6a, 0x65, 0x63, 0x74, 0x49, 0x44, 0x52, 0x08, 0x6f, 0x62, 0x6a, 0x65, 0x63, 0x74, + 0x49, 0x44, 0x12, 0x37, 0x0a, 0x09, 0x73, 0x69, 0x67, 0x6e, 0x61, 0x74, 0x75, 0x72, 0x65, 0x18, + 0x02, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x19, 0x2e, 0x6e, 0x65, 0x6f, 0x2e, 0x66, 0x73, 0x2e, 0x76, + 0x32, 0x2e, 0x72, 0x65, 0x66, 0x73, 0x2e, 0x53, 0x69, 0x67, 0x6e, 0x61, 0x74, 0x75, 0x72, 0x65, + 0x52, 0x09, 0x73, 0x69, 0x67, 0x6e, 0x61, 0x74, 0x75, 0x72, 0x65, 0x12, 0x30, 0x0a, 0x06, 0x68, + 0x65, 0x61, 0x64, 0x65, 0x72, 0x18, 0x03, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x18, 0x2e, 0x6e, 0x65, + 0x6f, 0x2e, 0x66, 0x73, 0x2e, 0x76, 0x32, 0x2e, 0x6f, 0x62, 0x6a, 0x65, 0x63, 0x74, 0x2e, 0x48, + 0x65, 0x61, 0x64, 0x65, 0x72, 0x52, 0x06, 0x68, 0x65, 0x61, 0x64, 0x65, 0x72, 0x12, 0x18, 0x0a, + 0x07, 0x70, 0x61, 0x79, 0x6c, 0x6f, 0x61, 0x64, 0x18, 0x04, 0x20, 0x01, 0x28, 0x0c, 0x52, 0x07, + 0x70, 0x61, 0x79, 0x6c, 0x6f, 0x61, 0x64, 0x22, 0xc4, 0x01, 0x0a, 0x09, 0x53, 0x70, 0x6c, 0x69, + 0x74, 0x49, 0x6e, 0x66, 0x6f, 0x12, 0x19, 0x0a, 0x08, 0x73, 0x70, 0x6c, 0x69, 0x74, 0x5f, 0x69, + 0x64, 0x18, 0x01, 0x20, 0x01, 0x28, 0x0c, 0x52, 0x07, 0x73, 0x70, 0x6c, 0x69, 0x74, 0x49, 0x64, + 0x12, 0x35, 0x0a, 0x09, 0x6c, 0x61, 0x73, 0x74, 0x5f, 0x70, 0x61, 0x72, 0x74, 0x18, 0x02, 0x20, + 0x01, 0x28, 0x0b, 0x32, 0x18, 0x2e, 0x6e, 0x65, 0x6f, 0x2e, 0x66, 0x73, 0x2e, 0x76, 0x32, 0x2e, + 0x72, 0x65, 0x66, 0x73, 0x2e, 0x4f, 0x62, 0x6a, 0x65, 0x63, 0x74, 0x49, 0x44, 0x52, 0x08, 0x6c, + 0x61, 0x73, 0x74, 0x50, 0x61, 0x72, 0x74, 0x12, 0x2c, 0x0a, 0x04, 0x6c, 0x69, 0x6e, 0x6b, 0x18, + 0x03, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x18, 0x2e, 0x6e, 0x65, 0x6f, 0x2e, 0x66, 0x73, 0x2e, 0x76, + 0x32, 0x2e, 0x72, 0x65, 0x66, 0x73, 0x2e, 0x4f, 0x62, 0x6a, 0x65, 0x63, 0x74, 0x49, 0x44, 0x52, + 0x04, 0x6c, 0x69, 0x6e, 0x6b, 0x12, 0x37, 0x0a, 0x0a, 0x66, 0x69, 0x72, 0x73, 0x74, 0x5f, 0x70, + 0x61, 0x72, 0x74, 0x18, 0x04, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x18, 0x2e, 0x6e, 0x65, 0x6f, 0x2e, + 0x66, 0x73, 0x2e, 0x76, 0x32, 0x2e, 0x72, 0x65, 0x66, 0x73, 0x2e, 0x4f, 0x62, 0x6a, 0x65, 0x63, + 0x74, 0x49, 0x44, 0x52, 0x09, 0x66, 0x69, 0x72, 0x73, 0x74, 0x50, 0x61, 0x72, 0x74, 0x2a, 0x4f, + 0x0a, 0x0a, 0x4f, 0x62, 0x6a, 0x65, 0x63, 0x74, 0x54, 0x79, 0x70, 0x65, 0x12, 0x0b, 0x0a, 0x07, + 0x52, 0x45, 0x47, 0x55, 0x4c, 0x41, 0x52, 0x10, 0x00, 0x12, 0x0d, 0x0a, 0x09, 0x54, 0x4f, 0x4d, + 0x42, 0x53, 0x54, 0x4f, 0x4e, 0x45, 0x10, 0x01, 0x12, 0x11, 0x0a, 0x0d, 0x53, 0x54, 0x4f, 0x52, + 0x41, 0x47, 0x45, 0x5f, 0x47, 0x52, 0x4f, 0x55, 0x50, 0x10, 0x02, 0x12, 0x08, 0x0a, 0x04, 0x4c, + 0x4f, 0x43, 0x4b, 0x10, 0x03, 0x12, 0x08, 0x0a, 0x04, 0x4c, 0x49, 0x4e, 0x4b, 0x10, 0x04, 0x2a, + 0xa3, 0x01, 0x0a, 0x09, 0x4d, 0x61, 0x74, 0x63, 0x68, 0x54, 0x79, 0x70, 0x65, 0x12, 0x1a, 0x0a, + 0x16, 0x4d, 0x41, 0x54, 0x43, 0x48, 0x5f, 0x54, 0x59, 0x50, 0x45, 0x5f, 0x55, 0x4e, 0x53, 0x50, + 0x45, 0x43, 0x49, 0x46, 0x49, 0x45, 0x44, 0x10, 0x00, 0x12, 0x10, 0x0a, 0x0c, 0x53, 0x54, 0x52, + 0x49, 0x4e, 0x47, 0x5f, 0x45, 0x51, 0x55, 0x41, 0x4c, 0x10, 0x01, 0x12, 0x14, 0x0a, 0x10, 0x53, + 0x54, 0x52, 0x49, 0x4e, 0x47, 0x5f, 0x4e, 0x4f, 0x54, 0x5f, 0x45, 0x51, 0x55, 0x41, 0x4c, 0x10, + 0x02, 0x12, 0x0f, 0x0a, 0x0b, 0x4e, 0x4f, 0x54, 0x5f, 0x50, 0x52, 0x45, 0x53, 0x45, 0x4e, 0x54, + 0x10, 0x03, 0x12, 0x11, 0x0a, 0x0d, 0x43, 0x4f, 0x4d, 0x4d, 0x4f, 0x4e, 0x5f, 0x50, 0x52, 0x45, + 0x46, 0x49, 0x58, 0x10, 0x04, 0x12, 0x0a, 0x0a, 0x06, 0x4e, 0x55, 0x4d, 0x5f, 0x47, 0x54, 0x10, + 0x05, 0x12, 0x0a, 0x0a, 0x06, 0x4e, 0x55, 0x4d, 0x5f, 0x47, 0x45, 0x10, 0x06, 0x12, 0x0a, 0x0a, + 0x06, 0x4e, 0x55, 0x4d, 0x5f, 0x4c, 0x54, 0x10, 0x07, 0x12, 0x0a, 0x0a, 0x06, 0x4e, 0x55, 0x4d, + 0x5f, 0x4c, 0x45, 0x10, 0x08, 0x42, 0x56, 0x5a, 0x37, 0x67, 0x69, 0x74, 0x68, 0x75, 0x62, 0x2e, + 0x63, 0x6f, 0x6d, 0x2f, 0x6e, 0x73, 0x70, 0x63, 0x63, 0x2d, 0x64, 0x65, 0x76, 0x2f, 0x6e, 0x65, + 0x6f, 0x66, 0x73, 0x2d, 0x61, 0x70, 0x69, 0x2d, 0x67, 0x6f, 0x2f, 0x76, 0x32, 0x2f, 0x6f, 0x62, + 0x6a, 0x65, 0x63, 0x74, 0x2f, 0x67, 0x72, 0x70, 0x63, 0x3b, 0x6f, 0x62, 0x6a, 0x65, 0x63, 0x74, + 0xaa, 0x02, 0x1a, 0x4e, 0x65, 0x6f, 0x2e, 0x46, 0x69, 0x6c, 0x65, 0x53, 0x74, 0x6f, 0x72, 0x61, + 0x67, 0x65, 0x2e, 0x41, 0x50, 0x49, 0x2e, 0x4f, 0x62, 0x6a, 0x65, 0x63, 0x74, 0x62, 0x06, 0x70, + 0x72, 0x6f, 0x74, 0x6f, 0x33, +} + +var ( + file_object_grpc_types_proto_rawDescOnce sync.Once + file_object_grpc_types_proto_rawDescData = file_object_grpc_types_proto_rawDesc +) + +func file_object_grpc_types_proto_rawDescGZIP() []byte { + file_object_grpc_types_proto_rawDescOnce.Do(func() { + file_object_grpc_types_proto_rawDescData = protoimpl.X.CompressGZIP(file_object_grpc_types_proto_rawDescData) + }) + return file_object_grpc_types_proto_rawDescData +} + +var file_object_grpc_types_proto_enumTypes = make([]protoimpl.EnumInfo, 2) +var file_object_grpc_types_proto_msgTypes = make([]protoimpl.MessageInfo, 6) +var file_object_grpc_types_proto_goTypes = []interface{}{ + (ObjectType)(0), // 0: neo.fs.v2.object.ObjectType + (MatchType)(0), // 1: neo.fs.v2.object.MatchType + (*ShortHeader)(nil), // 2: neo.fs.v2.object.ShortHeader + (*Header)(nil), // 3: neo.fs.v2.object.Header + (*Object)(nil), // 4: neo.fs.v2.object.Object + (*SplitInfo)(nil), // 5: neo.fs.v2.object.SplitInfo + (*Header_Attribute)(nil), // 6: neo.fs.v2.object.Header.Attribute + (*Header_Split)(nil), // 7: neo.fs.v2.object.Header.Split + (*refs.Version)(nil), // 8: neo.fs.v2.refs.Version + (*refs.OwnerID)(nil), // 9: neo.fs.v2.refs.OwnerID + (*refs.Checksum)(nil), // 10: neo.fs.v2.refs.Checksum + (*refs.ContainerID)(nil), // 11: neo.fs.v2.refs.ContainerID + (*session.SessionToken)(nil), // 12: neo.fs.v2.session.SessionToken + (*refs.ObjectID)(nil), // 13: neo.fs.v2.refs.ObjectID + (*refs.Signature)(nil), // 14: neo.fs.v2.refs.Signature +} +var file_object_grpc_types_proto_depIdxs = []int32{ + 8, // 0: neo.fs.v2.object.ShortHeader.version:type_name -> neo.fs.v2.refs.Version + 9, // 1: neo.fs.v2.object.ShortHeader.owner_id:type_name -> neo.fs.v2.refs.OwnerID + 0, // 2: neo.fs.v2.object.ShortHeader.object_type:type_name -> neo.fs.v2.object.ObjectType + 10, // 3: neo.fs.v2.object.ShortHeader.payload_hash:type_name -> neo.fs.v2.refs.Checksum + 10, // 4: neo.fs.v2.object.ShortHeader.homomorphic_hash:type_name -> neo.fs.v2.refs.Checksum + 8, // 5: neo.fs.v2.object.Header.version:type_name -> neo.fs.v2.refs.Version + 11, // 6: neo.fs.v2.object.Header.container_id:type_name -> neo.fs.v2.refs.ContainerID + 9, // 7: neo.fs.v2.object.Header.owner_id:type_name -> neo.fs.v2.refs.OwnerID + 10, // 8: neo.fs.v2.object.Header.payload_hash:type_name -> neo.fs.v2.refs.Checksum + 0, // 9: neo.fs.v2.object.Header.object_type:type_name -> neo.fs.v2.object.ObjectType + 10, // 10: neo.fs.v2.object.Header.homomorphic_hash:type_name -> neo.fs.v2.refs.Checksum + 12, // 11: neo.fs.v2.object.Header.session_token:type_name -> neo.fs.v2.session.SessionToken + 6, // 12: neo.fs.v2.object.Header.attributes:type_name -> neo.fs.v2.object.Header.Attribute + 7, // 13: neo.fs.v2.object.Header.split:type_name -> neo.fs.v2.object.Header.Split + 13, // 14: neo.fs.v2.object.Object.object_id:type_name -> neo.fs.v2.refs.ObjectID + 14, // 15: neo.fs.v2.object.Object.signature:type_name -> neo.fs.v2.refs.Signature + 3, // 16: neo.fs.v2.object.Object.header:type_name -> neo.fs.v2.object.Header + 13, // 17: neo.fs.v2.object.SplitInfo.last_part:type_name -> neo.fs.v2.refs.ObjectID + 13, // 18: neo.fs.v2.object.SplitInfo.link:type_name -> neo.fs.v2.refs.ObjectID + 13, // 19: neo.fs.v2.object.SplitInfo.first_part:type_name -> neo.fs.v2.refs.ObjectID + 13, // 20: neo.fs.v2.object.Header.Split.parent:type_name -> neo.fs.v2.refs.ObjectID + 13, // 21: neo.fs.v2.object.Header.Split.previous:type_name -> neo.fs.v2.refs.ObjectID + 14, // 22: neo.fs.v2.object.Header.Split.parent_signature:type_name -> neo.fs.v2.refs.Signature + 3, // 23: neo.fs.v2.object.Header.Split.parent_header:type_name -> neo.fs.v2.object.Header + 13, // 24: neo.fs.v2.object.Header.Split.children:type_name -> neo.fs.v2.refs.ObjectID + 13, // 25: neo.fs.v2.object.Header.Split.first:type_name -> neo.fs.v2.refs.ObjectID + 26, // [26:26] is the sub-list for method output_type + 26, // [26:26] is the sub-list for method input_type + 26, // [26:26] is the sub-list for extension type_name + 26, // [26:26] is the sub-list for extension extendee + 0, // [0:26] is the sub-list for field type_name +} + +func init() { file_object_grpc_types_proto_init() } +func file_object_grpc_types_proto_init() { + if File_object_grpc_types_proto != nil { + return + } + if !protoimpl.UnsafeEnabled { + file_object_grpc_types_proto_msgTypes[0].Exporter = func(v interface{}, i int) interface{} { + switch v := v.(*ShortHeader); i { + case 0: + return &v.state + case 1: + return &v.sizeCache + case 2: + return &v.unknownFields + default: + return nil + } + } + file_object_grpc_types_proto_msgTypes[1].Exporter = func(v interface{}, i int) interface{} { + switch v := v.(*Header); i { + case 0: + return &v.state + case 1: + return &v.sizeCache + case 2: + return &v.unknownFields + default: + return nil + } + } + file_object_grpc_types_proto_msgTypes[2].Exporter = func(v interface{}, i int) interface{} { + switch v := v.(*Object); i { + case 0: + return &v.state + case 1: + return &v.sizeCache + case 2: + return &v.unknownFields + default: + return nil + } + } + file_object_grpc_types_proto_msgTypes[3].Exporter = func(v interface{}, i int) interface{} { + switch v := v.(*SplitInfo); i { + case 0: + return &v.state + case 1: + return &v.sizeCache + case 2: + return &v.unknownFields + default: + return nil + } + } + file_object_grpc_types_proto_msgTypes[4].Exporter = func(v interface{}, i int) interface{} { + switch v := v.(*Header_Attribute); i { + case 0: + return &v.state + case 1: + return &v.sizeCache + case 2: + return &v.unknownFields + default: + return nil + } + } + file_object_grpc_types_proto_msgTypes[5].Exporter = func(v interface{}, i int) interface{} { + switch v := v.(*Header_Split); i { + case 0: + return &v.state + case 1: + return &v.sizeCache + case 2: + return &v.unknownFields + default: + return nil + } + } + } + type x struct{} + out := protoimpl.TypeBuilder{ + File: protoimpl.DescBuilder{ + GoPackagePath: reflect.TypeOf(x{}).PkgPath(), + RawDescriptor: file_object_grpc_types_proto_rawDesc, + NumEnums: 2, + NumMessages: 6, + NumExtensions: 0, + NumServices: 0, + }, + GoTypes: file_object_grpc_types_proto_goTypes, + DependencyIndexes: file_object_grpc_types_proto_depIdxs, + EnumInfos: file_object_grpc_types_proto_enumTypes, + MessageInfos: file_object_grpc_types_proto_msgTypes, + }.Build() + File_object_grpc_types_proto = out.File + file_object_grpc_types_proto_rawDesc = nil + file_object_grpc_types_proto_goTypes = nil + file_object_grpc_types_proto_depIdxs = nil +} diff --git a/api/refs/encoding.go b/api/refs/encoding.go new file mode 100644 index 000000000..e9d0f6ec1 --- /dev/null +++ b/api/refs/encoding.go @@ -0,0 +1,192 @@ +package refs + +import "github.com/nspcc-dev/neofs-sdk-go/internal/proto" + +const ( + _ = iota + fieldOwnerIDValue +) + +func (x *OwnerID) MarshaledSize() int { + var sz int + if x != nil { + sz = proto.SizeBytes(fieldOwnerIDValue, x.Value) + } + return sz +} + +func (x *OwnerID) MarshalStable(b []byte) { + if x != nil { + proto.MarshalBytes(b, fieldOwnerIDValue, x.Value) + } +} + +const ( + _ = iota + fieldVersionMajor + fieldVersionMinor +) + +func (x *Version) MarshaledSize() int { + var sz int + if x != nil { + sz = proto.SizeVarint(fieldVersionMajor, x.Major) + + proto.SizeVarint(fieldVersionMinor, x.Minor) + } + return sz +} + +func (x *Version) MarshalStable(b []byte) { + if x != nil { + off := proto.MarshalVarint(b, fieldVersionMajor, x.Major) + proto.MarshalVarint(b[off:], fieldVersionMinor, x.Minor) + } +} + +const ( + _ = iota + fieldContainerIDValue +) + +func (x *ContainerID) MarshaledSize() int { + var sz int + if x != nil { + sz = proto.SizeBytes(fieldContainerIDValue, x.Value) + } + return sz +} + +func (x *ContainerID) MarshalStable(b []byte) { + if x != nil { + proto.MarshalBytes(b, fieldContainerIDValue, x.Value) + } +} + +const ( + _ = iota + fieldObjectIDValue +) + +func (x *ObjectID) MarshaledSize() int { + var sz int + if x != nil { + sz = proto.SizeBytes(fieldObjectIDValue, x.Value) + } + return sz +} + +func (x *ObjectID) MarshalStable(b []byte) { + if x != nil { + proto.MarshalBytes(b, fieldObjectIDValue, x.Value) + } +} + +const ( + _ = iota + fieldAddressContainer + fieldAddressObject +) + +func (x *Address) MarshaledSize() int { + var sz int + if x != nil { + sz = proto.SizeNested(fieldAddressContainer, x.ContainerId) + + proto.SizeNested(fieldAddressObject, x.ObjectId) + } + return sz +} + +func (x *Address) MarshalStable(b []byte) { + if x != nil { + off := proto.MarshalNested(b, fieldAddressContainer, x.ContainerId) + proto.MarshalNested(b[off:], fieldAddressObject, x.ObjectId) + } +} + +const ( + _ = iota + fieldSubnetVal +) + +func (x *SubnetID) MarshaledSize() int { + var sz int + if x != nil { + sz = proto.SizeFixed32(fieldSubnetVal, x.Value) + } + return sz +} + +func (x *SubnetID) MarshalStable(b []byte) { + if x != nil { + proto.MarshalFixed32(b, fieldSubnetVal, x.Value) + } +} + +const ( + _ = iota + fieldSignatureKey + fieldSignatureVal + fieldSignatureScheme +) + +func (x *Signature) MarshaledSize() int { + var sz int + if x != nil { + sz = proto.SizeBytes(fieldSignatureKey, x.Key) + + proto.SizeBytes(fieldSignatureVal, x.Sign) + + proto.SizeVarint(fieldSignatureScheme, int32(x.Scheme)) + } + return sz +} + +func (x *Signature) MarshalStable(b []byte) { + if x != nil { + off := proto.MarshalBytes(b, fieldSignatureKey, x.Key) + off += proto.MarshalBytes(b[off:], fieldSignatureVal, x.Sign) + proto.MarshalVarint(b[off:], fieldSignatureScheme, int32(x.Scheme)) + } +} + +const ( + _ = iota + fieldSigRFC6979Key + fieldSigRFC6979Val +) + +func (x *SignatureRFC6979) MarshaledSize() int { + var sz int + if x != nil { + sz = proto.SizeBytes(fieldSigRFC6979Key, x.Key) + + proto.SizeBytes(fieldSigRFC6979Val, x.Sign) + } + return sz +} + +func (x *SignatureRFC6979) MarshalStable(b []byte) { + if x != nil { + off := proto.MarshalBytes(b, fieldSigRFC6979Key, x.Key) + proto.MarshalBytes(b[off:], fieldSigRFC6979Val, x.Sign) + } +} + +const ( + _ = iota + fieldChecksumType + fieldChecksumValue +) + +func (x *Checksum) MarshaledSize() int { + var sz int + if x != nil { + sz = proto.SizeVarint(fieldChecksumType, int32(x.Type)) + + proto.SizeBytes(fieldChecksumValue, x.Sum) + } + return sz +} + +func (x *Checksum) MarshalStable(b []byte) { + if x != nil { + off := proto.MarshalVarint(b, fieldChecksumType, int32(x.Type)) + proto.MarshalBytes(b[off:], fieldChecksumValue, x.Sum) + } +} diff --git a/api/refs/encoding_test.go b/api/refs/encoding_test.go new file mode 100644 index 000000000..765a2b870 --- /dev/null +++ b/api/refs/encoding_test.go @@ -0,0 +1,165 @@ +package refs_test + +import ( + "testing" + + "github.com/nspcc-dev/neofs-sdk-go/api/refs" + "github.com/stretchr/testify/require" + "google.golang.org/protobuf/proto" +) + +func TestOwnerID(t *testing.T) { + v := &refs.OwnerID{ + Value: []byte("any_owner"), + } + + sz := v.MarshaledSize() + b := make([]byte, sz) + v.MarshalStable(b) + + var res refs.OwnerID + err := proto.Unmarshal(b, &res) + require.NoError(t, err) + require.Empty(t, res.ProtoReflect().GetUnknown()) + require.Equal(t, v.Value, res.Value) +} + +func TestVersion(t *testing.T) { + v := &refs.Version{ + Major: 123, + Minor: 456, + } + + sz := v.MarshaledSize() + b := make([]byte, sz) + v.MarshalStable(b) + + var res refs.Version + err := proto.Unmarshal(b, &res) + require.NoError(t, err) + require.Empty(t, res.ProtoReflect().GetUnknown()) + require.EqualValues(t, v.Major, res.Major) + require.EqualValues(t, v.Minor, res.Minor) +} + +func TestContainerID(t *testing.T) { + v := &refs.ContainerID{ + Value: []byte("any_container"), + } + + sz := v.MarshaledSize() + b := make([]byte, sz) + v.MarshalStable(b) + + var res refs.ContainerID + err := proto.Unmarshal(b, &res) + require.NoError(t, err) + require.Empty(t, res.ProtoReflect().GetUnknown()) + require.Equal(t, v.Value, res.Value) +} + +func TestObjectID(t *testing.T) { + v := &refs.ObjectID{ + Value: []byte("any_object"), + } + + sz := v.MarshaledSize() + b := make([]byte, sz) + v.MarshalStable(b) + + var res refs.ObjectID + err := proto.Unmarshal(b, &res) + require.NoError(t, err) + require.Empty(t, res.ProtoReflect().GetUnknown()) + require.Equal(t, v.Value, res.Value) +} + +func TestAddress(t *testing.T) { + v := &refs.Address{ + ContainerId: &refs.ContainerID{Value: []byte("any_container")}, + ObjectId: &refs.ObjectID{Value: []byte("any_object")}, + } + + sz := v.MarshaledSize() + b := make([]byte, sz) + v.MarshalStable(b) + + var res refs.Address + err := proto.Unmarshal(b, &res) + require.NoError(t, err) + require.Empty(t, res.ProtoReflect().GetUnknown()) + require.Equal(t, v.ContainerId, res.ContainerId) + require.Equal(t, v.ObjectId, res.ObjectId) +} + +func TestSubnetID(t *testing.T) { + v := &refs.SubnetID{ + Value: 123456, + } + + sz := v.MarshaledSize() + b := make([]byte, sz) + v.MarshalStable(b) + + var res refs.SubnetID + err := proto.Unmarshal(b, &res) + require.NoError(t, err) + require.Empty(t, res.ProtoReflect().GetUnknown()) + require.Equal(t, v.Value, res.Value) +} + +func TestSignature(t *testing.T) { + v := &refs.Signature{ + Key: []byte("any_key"), + Sign: []byte("any_val"), + Scheme: 123, + } + + sz := v.MarshaledSize() + b := make([]byte, sz) + v.MarshalStable(b) + + var res refs.Signature + err := proto.Unmarshal(b, &res) + require.NoError(t, err) + require.Empty(t, res.ProtoReflect().GetUnknown()) + require.Equal(t, v.Key, res.Key) + require.Equal(t, v.Sign, res.Sign) + require.Equal(t, v.Scheme, res.Scheme) +} + +func TestSignatureRFC6979(t *testing.T) { + v := &refs.SignatureRFC6979{ + Key: []byte("any_key"), + Sign: []byte("any_val"), + } + + sz := v.MarshaledSize() + b := make([]byte, sz) + v.MarshalStable(b) + + var res refs.SignatureRFC6979 + err := proto.Unmarshal(b, &res) + require.NoError(t, err) + require.Empty(t, res.ProtoReflect().GetUnknown()) + require.Equal(t, v.Key, res.Key) + require.Equal(t, v.Sign, res.Sign) +} + +func TestChecksum(t *testing.T) { + v := &refs.Checksum{ + Type: 321, + Sum: []byte("any_checksum"), + } + + sz := v.MarshaledSize() + b := make([]byte, sz) + v.MarshalStable(b) + + var res refs.Checksum + err := proto.Unmarshal(b, &res) + require.NoError(t, err) + require.Empty(t, res.ProtoReflect().GetUnknown()) + require.Equal(t, v.Type, res.Type) + require.Equal(t, v.Sum, res.Sum) +} diff --git a/api/refs/types.pb.go b/api/refs/types.pb.go new file mode 100644 index 000000000..47c6b918d --- /dev/null +++ b/api/refs/types.pb.go @@ -0,0 +1,920 @@ +// Code generated by protoc-gen-go. DO NOT EDIT. +// versions: +// protoc-gen-go v1.32.0 +// protoc v4.25.1 +// source: refs/grpc/types.proto + +package refs + +import ( + reflect "reflect" + sync "sync" + + protoreflect "google.golang.org/protobuf/reflect/protoreflect" + protoimpl "google.golang.org/protobuf/runtime/protoimpl" +) + +const ( + // Verify that this generated code is sufficiently up-to-date. + _ = protoimpl.EnforceVersion(20 - protoimpl.MinVersion) + // Verify that runtime/protoimpl is sufficiently up-to-date. + _ = protoimpl.EnforceVersion(protoimpl.MaxVersion - 20) +) + +// Signature scheme describes digital signing scheme used for (key, signature) pair. +type SignatureScheme int32 + +const ( + // ECDSA with SHA-512 hashing (FIPS 186-3) + SignatureScheme_ECDSA_SHA512 SignatureScheme = 0 + // Deterministic ECDSA with SHA-256 hashing (RFC 6979) + SignatureScheme_ECDSA_RFC6979_SHA256 SignatureScheme = 1 + // Deterministic ECDSA with SHA-256 hashing using WalletConnect API. + // Here the algorithm is the same, but the message format differs. + SignatureScheme_ECDSA_RFC6979_SHA256_WALLET_CONNECT SignatureScheme = 2 +) + +// Enum value maps for SignatureScheme. +var ( + SignatureScheme_name = map[int32]string{ + 0: "ECDSA_SHA512", + 1: "ECDSA_RFC6979_SHA256", + 2: "ECDSA_RFC6979_SHA256_WALLET_CONNECT", + } + SignatureScheme_value = map[string]int32{ + "ECDSA_SHA512": 0, + "ECDSA_RFC6979_SHA256": 1, + "ECDSA_RFC6979_SHA256_WALLET_CONNECT": 2, + } +) + +func (x SignatureScheme) Enum() *SignatureScheme { + p := new(SignatureScheme) + *p = x + return p +} + +func (x SignatureScheme) String() string { + return protoimpl.X.EnumStringOf(x.Descriptor(), protoreflect.EnumNumber(x)) +} + +func (SignatureScheme) Descriptor() protoreflect.EnumDescriptor { + return file_refs_grpc_types_proto_enumTypes[0].Descriptor() +} + +func (SignatureScheme) Type() protoreflect.EnumType { + return &file_refs_grpc_types_proto_enumTypes[0] +} + +func (x SignatureScheme) Number() protoreflect.EnumNumber { + return protoreflect.EnumNumber(x) +} + +// Deprecated: Use SignatureScheme.Descriptor instead. +func (SignatureScheme) EnumDescriptor() ([]byte, []int) { + return file_refs_grpc_types_proto_rawDescGZIP(), []int{0} +} + +// Checksum algorithm type. +type ChecksumType int32 + +const ( + // Unknown. Not used + ChecksumType_CHECKSUM_TYPE_UNSPECIFIED ChecksumType = 0 + // Tillich-Zemor homomorphic hash function + ChecksumType_TZ ChecksumType = 1 + // SHA-256 + ChecksumType_SHA256 ChecksumType = 2 +) + +// Enum value maps for ChecksumType. +var ( + ChecksumType_name = map[int32]string{ + 0: "CHECKSUM_TYPE_UNSPECIFIED", + 1: "TZ", + 2: "SHA256", + } + ChecksumType_value = map[string]int32{ + "CHECKSUM_TYPE_UNSPECIFIED": 0, + "TZ": 1, + "SHA256": 2, + } +) + +func (x ChecksumType) Enum() *ChecksumType { + p := new(ChecksumType) + *p = x + return p +} + +func (x ChecksumType) String() string { + return protoimpl.X.EnumStringOf(x.Descriptor(), protoreflect.EnumNumber(x)) +} + +func (ChecksumType) Descriptor() protoreflect.EnumDescriptor { + return file_refs_grpc_types_proto_enumTypes[1].Descriptor() +} + +func (ChecksumType) Type() protoreflect.EnumType { + return &file_refs_grpc_types_proto_enumTypes[1] +} + +func (x ChecksumType) Number() protoreflect.EnumNumber { + return protoreflect.EnumNumber(x) +} + +// Deprecated: Use ChecksumType.Descriptor instead. +func (ChecksumType) EnumDescriptor() ([]byte, []int) { + return file_refs_grpc_types_proto_rawDescGZIP(), []int{1} +} + +// Objects in NeoFS are addressed by their ContainerID and ObjectID. +// +// String presentation of `Address` is a concatenation of string encoded +// `ContainerID` and `ObjectID` delimited by '/' character. +type Address struct { + state protoimpl.MessageState + sizeCache protoimpl.SizeCache + unknownFields protoimpl.UnknownFields + + // Container identifier + ContainerId *ContainerID `protobuf:"bytes,1,opt,name=container_id,json=containerID,proto3" json:"container_id,omitempty"` + // Object identifier + ObjectId *ObjectID `protobuf:"bytes,2,opt,name=object_id,json=objectID,proto3" json:"object_id,omitempty"` +} + +func (x *Address) Reset() { + *x = Address{} + if protoimpl.UnsafeEnabled { + mi := &file_refs_grpc_types_proto_msgTypes[0] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) + } +} + +func (x *Address) String() string { + return protoimpl.X.MessageStringOf(x) +} + +func (*Address) ProtoMessage() {} + +func (x *Address) ProtoReflect() protoreflect.Message { + mi := &file_refs_grpc_types_proto_msgTypes[0] + if protoimpl.UnsafeEnabled && x != nil { + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + if ms.LoadMessageInfo() == nil { + ms.StoreMessageInfo(mi) + } + return ms + } + return mi.MessageOf(x) +} + +// Deprecated: Use Address.ProtoReflect.Descriptor instead. +func (*Address) Descriptor() ([]byte, []int) { + return file_refs_grpc_types_proto_rawDescGZIP(), []int{0} +} + +func (x *Address) GetContainerId() *ContainerID { + if x != nil { + return x.ContainerId + } + return nil +} + +func (x *Address) GetObjectId() *ObjectID { + if x != nil { + return x.ObjectId + } + return nil +} + +// NeoFS Object unique identifier. Objects are immutable and content-addressed. +// It means `ObjectID` will change if the `header` or the `payload` changes. +// +// `ObjectID` is a 32 byte long +// [SHA256](https://csrc.nist.gov/publications/detail/fips/180/4/final) hash of +// the object's `header` field, which, in it's turn, contains the hash of the object's +// payload. +// +// String presentation is a +// [base58](https://tools.ietf.org/html/draft-msporny-base58-02) encoded string. +// +// JSON value will be data encoded as a string using standard base64 +// encoding with paddings. Either +// [standard](https://tools.ietf.org/html/rfc4648#section-4) or +// [URL-safe](https://tools.ietf.org/html/rfc4648#section-5) base64 encoding +// with/without paddings are accepted. +type ObjectID struct { + state protoimpl.MessageState + sizeCache protoimpl.SizeCache + unknownFields protoimpl.UnknownFields + + // Object identifier in a binary format + Value []byte `protobuf:"bytes,1,opt,name=value,proto3" json:"value,omitempty"` +} + +func (x *ObjectID) Reset() { + *x = ObjectID{} + if protoimpl.UnsafeEnabled { + mi := &file_refs_grpc_types_proto_msgTypes[1] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) + } +} + +func (x *ObjectID) String() string { + return protoimpl.X.MessageStringOf(x) +} + +func (*ObjectID) ProtoMessage() {} + +func (x *ObjectID) ProtoReflect() protoreflect.Message { + mi := &file_refs_grpc_types_proto_msgTypes[1] + if protoimpl.UnsafeEnabled && x != nil { + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + if ms.LoadMessageInfo() == nil { + ms.StoreMessageInfo(mi) + } + return ms + } + return mi.MessageOf(x) +} + +// Deprecated: Use ObjectID.ProtoReflect.Descriptor instead. +func (*ObjectID) Descriptor() ([]byte, []int) { + return file_refs_grpc_types_proto_rawDescGZIP(), []int{1} +} + +func (x *ObjectID) GetValue() []byte { + if x != nil { + return x.Value + } + return nil +} + +// NeoFS container identifier. Container structures are immutable and +// content-addressed. +// +// `ContainerID` is a 32 byte long +// [SHA256](https://csrc.nist.gov/publications/detail/fips/180/4/final) hash of +// stable-marshalled container message. +// +// String presentation is a +// [base58](https://tools.ietf.org/html/draft-msporny-base58-02) encoded string. +// +// JSON value will be data encoded as a string using standard base64 +// encoding with paddings. Either +// [standard](https://tools.ietf.org/html/rfc4648#section-4) or +// [URL-safe](https://tools.ietf.org/html/rfc4648#section-5) base64 encoding +// with/without paddings are accepted. +type ContainerID struct { + state protoimpl.MessageState + sizeCache protoimpl.SizeCache + unknownFields protoimpl.UnknownFields + + // Container identifier in a binary format. + Value []byte `protobuf:"bytes,1,opt,name=value,proto3" json:"value,omitempty"` +} + +func (x *ContainerID) Reset() { + *x = ContainerID{} + if protoimpl.UnsafeEnabled { + mi := &file_refs_grpc_types_proto_msgTypes[2] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) + } +} + +func (x *ContainerID) String() string { + return protoimpl.X.MessageStringOf(x) +} + +func (*ContainerID) ProtoMessage() {} + +func (x *ContainerID) ProtoReflect() protoreflect.Message { + mi := &file_refs_grpc_types_proto_msgTypes[2] + if protoimpl.UnsafeEnabled && x != nil { + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + if ms.LoadMessageInfo() == nil { + ms.StoreMessageInfo(mi) + } + return ms + } + return mi.MessageOf(x) +} + +// Deprecated: Use ContainerID.ProtoReflect.Descriptor instead. +func (*ContainerID) Descriptor() ([]byte, []int) { + return file_refs_grpc_types_proto_rawDescGZIP(), []int{2} +} + +func (x *ContainerID) GetValue() []byte { + if x != nil { + return x.Value + } + return nil +} + +// `OwnerID` is a derivative of a user's main public key. The transformation +// algorithm is the same as for Neo3 wallet addresses. Neo3 wallet address can +// be directly used as `OwnerID`. +// +// `OwnerID` is a 25 bytes sequence starting with Neo version prefix byte +// followed by 20 bytes of ScrptHash and 4 bytes of checksum. +// +// String presentation is a [Base58 +// Check](https://en.bitcoin.it/wiki/Base58Check_encoding) Encoded string. +// +// JSON value will be data encoded as a string using standard base64 +// encoding with paddings. Either +// [standard](https://tools.ietf.org/html/rfc4648#section-4) or +// [URL-safe](https://tools.ietf.org/html/rfc4648#section-5) base64 encoding +// with/without paddings are accepted. +type OwnerID struct { + state protoimpl.MessageState + sizeCache protoimpl.SizeCache + unknownFields protoimpl.UnknownFields + + // Identifier of the container owner in a binary format + Value []byte `protobuf:"bytes,1,opt,name=value,proto3" json:"value,omitempty"` +} + +func (x *OwnerID) Reset() { + *x = OwnerID{} + if protoimpl.UnsafeEnabled { + mi := &file_refs_grpc_types_proto_msgTypes[3] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) + } +} + +func (x *OwnerID) String() string { + return protoimpl.X.MessageStringOf(x) +} + +func (*OwnerID) ProtoMessage() {} + +func (x *OwnerID) ProtoReflect() protoreflect.Message { + mi := &file_refs_grpc_types_proto_msgTypes[3] + if protoimpl.UnsafeEnabled && x != nil { + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + if ms.LoadMessageInfo() == nil { + ms.StoreMessageInfo(mi) + } + return ms + } + return mi.MessageOf(x) +} + +// Deprecated: Use OwnerID.ProtoReflect.Descriptor instead. +func (*OwnerID) Descriptor() ([]byte, []int) { + return file_refs_grpc_types_proto_rawDescGZIP(), []int{3} +} + +func (x *OwnerID) GetValue() []byte { + if x != nil { + return x.Value + } + return nil +} + +// NeoFS subnetwork identifier. +// +// String representation of a value is base-10 integer. +// +// JSON representation is an object containing a single `value` number field. +// +// DEPRECATED. Kept for compatibility only. +type SubnetID struct { + state protoimpl.MessageState + sizeCache protoimpl.SizeCache + unknownFields protoimpl.UnknownFields + + // 4-byte integer subnetwork identifier. + Value uint32 `protobuf:"fixed32,1,opt,name=value,proto3" json:"value,omitempty"` +} + +func (x *SubnetID) Reset() { + *x = SubnetID{} + if protoimpl.UnsafeEnabled { + mi := &file_refs_grpc_types_proto_msgTypes[4] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) + } +} + +func (x *SubnetID) String() string { + return protoimpl.X.MessageStringOf(x) +} + +func (*SubnetID) ProtoMessage() {} + +func (x *SubnetID) ProtoReflect() protoreflect.Message { + mi := &file_refs_grpc_types_proto_msgTypes[4] + if protoimpl.UnsafeEnabled && x != nil { + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + if ms.LoadMessageInfo() == nil { + ms.StoreMessageInfo(mi) + } + return ms + } + return mi.MessageOf(x) +} + +// Deprecated: Use SubnetID.ProtoReflect.Descriptor instead. +func (*SubnetID) Descriptor() ([]byte, []int) { + return file_refs_grpc_types_proto_rawDescGZIP(), []int{4} +} + +func (x *SubnetID) GetValue() uint32 { + if x != nil { + return x.Value + } + return 0 +} + +// API version used by a node. +// +// String presentation is a Semantic Versioning 2.0.0 compatible version string +// with 'v' prefix. i.e. `vX.Y`, where `X` is the major number, `Y` is the minor number. +type Version struct { + state protoimpl.MessageState + sizeCache protoimpl.SizeCache + unknownFields protoimpl.UnknownFields + + // Major API version + Major uint32 `protobuf:"varint,1,opt,name=major,proto3" json:"major,omitempty"` + // Minor API version + Minor uint32 `protobuf:"varint,2,opt,name=minor,proto3" json:"minor,omitempty"` +} + +func (x *Version) Reset() { + *x = Version{} + if protoimpl.UnsafeEnabled { + mi := &file_refs_grpc_types_proto_msgTypes[5] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) + } +} + +func (x *Version) String() string { + return protoimpl.X.MessageStringOf(x) +} + +func (*Version) ProtoMessage() {} + +func (x *Version) ProtoReflect() protoreflect.Message { + mi := &file_refs_grpc_types_proto_msgTypes[5] + if protoimpl.UnsafeEnabled && x != nil { + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + if ms.LoadMessageInfo() == nil { + ms.StoreMessageInfo(mi) + } + return ms + } + return mi.MessageOf(x) +} + +// Deprecated: Use Version.ProtoReflect.Descriptor instead. +func (*Version) Descriptor() ([]byte, []int) { + return file_refs_grpc_types_proto_rawDescGZIP(), []int{5} +} + +func (x *Version) GetMajor() uint32 { + if x != nil { + return x.Major + } + return 0 +} + +func (x *Version) GetMinor() uint32 { + if x != nil { + return x.Minor + } + return 0 +} + +// Signature of something in NeoFS. +type Signature struct { + state protoimpl.MessageState + sizeCache protoimpl.SizeCache + unknownFields protoimpl.UnknownFields + + // Public key used for signing + Key []byte `protobuf:"bytes,1,opt,name=key,proto3" json:"key,omitempty"` + // Signature + Sign []byte `protobuf:"bytes,2,opt,name=sign,json=signature,proto3" json:"sign,omitempty"` + // Scheme contains digital signature scheme identifier + Scheme SignatureScheme `protobuf:"varint,3,opt,name=scheme,proto3,enum=neo.fs.v2.refs.SignatureScheme" json:"scheme,omitempty"` +} + +func (x *Signature) Reset() { + *x = Signature{} + if protoimpl.UnsafeEnabled { + mi := &file_refs_grpc_types_proto_msgTypes[6] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) + } +} + +func (x *Signature) String() string { + return protoimpl.X.MessageStringOf(x) +} + +func (*Signature) ProtoMessage() {} + +func (x *Signature) ProtoReflect() protoreflect.Message { + mi := &file_refs_grpc_types_proto_msgTypes[6] + if protoimpl.UnsafeEnabled && x != nil { + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + if ms.LoadMessageInfo() == nil { + ms.StoreMessageInfo(mi) + } + return ms + } + return mi.MessageOf(x) +} + +// Deprecated: Use Signature.ProtoReflect.Descriptor instead. +func (*Signature) Descriptor() ([]byte, []int) { + return file_refs_grpc_types_proto_rawDescGZIP(), []int{6} +} + +func (x *Signature) GetKey() []byte { + if x != nil { + return x.Key + } + return nil +} + +func (x *Signature) GetSign() []byte { + if x != nil { + return x.Sign + } + return nil +} + +func (x *Signature) GetScheme() SignatureScheme { + if x != nil { + return x.Scheme + } + return SignatureScheme_ECDSA_SHA512 +} + +// RFC 6979 signature. +type SignatureRFC6979 struct { + state protoimpl.MessageState + sizeCache protoimpl.SizeCache + unknownFields protoimpl.UnknownFields + + // Public key used for signing + Key []byte `protobuf:"bytes,1,opt,name=key,proto3" json:"key,omitempty"` + // Deterministic ECDSA with SHA-256 hashing + Sign []byte `protobuf:"bytes,2,opt,name=sign,json=signature,proto3" json:"sign,omitempty"` +} + +func (x *SignatureRFC6979) Reset() { + *x = SignatureRFC6979{} + if protoimpl.UnsafeEnabled { + mi := &file_refs_grpc_types_proto_msgTypes[7] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) + } +} + +func (x *SignatureRFC6979) String() string { + return protoimpl.X.MessageStringOf(x) +} + +func (*SignatureRFC6979) ProtoMessage() {} + +func (x *SignatureRFC6979) ProtoReflect() protoreflect.Message { + mi := &file_refs_grpc_types_proto_msgTypes[7] + if protoimpl.UnsafeEnabled && x != nil { + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + if ms.LoadMessageInfo() == nil { + ms.StoreMessageInfo(mi) + } + return ms + } + return mi.MessageOf(x) +} + +// Deprecated: Use SignatureRFC6979.ProtoReflect.Descriptor instead. +func (*SignatureRFC6979) Descriptor() ([]byte, []int) { + return file_refs_grpc_types_proto_rawDescGZIP(), []int{7} +} + +func (x *SignatureRFC6979) GetKey() []byte { + if x != nil { + return x.Key + } + return nil +} + +func (x *SignatureRFC6979) GetSign() []byte { + if x != nil { + return x.Sign + } + return nil +} + +// Checksum message. +// Depending on checksum algorithm type, the string presentation may vary: +// +// - TZ \ +// Hex encoded string without `0x` prefix +// - SHA256 \ +// Hex encoded string without `0x` prefix +type Checksum struct { + state protoimpl.MessageState + sizeCache protoimpl.SizeCache + unknownFields protoimpl.UnknownFields + + // Checksum algorithm type + Type ChecksumType `protobuf:"varint,1,opt,name=type,proto3,enum=neo.fs.v2.refs.ChecksumType" json:"type,omitempty"` + // Checksum itself + Sum []byte `protobuf:"bytes,2,opt,name=sum,proto3" json:"sum,omitempty"` +} + +func (x *Checksum) Reset() { + *x = Checksum{} + if protoimpl.UnsafeEnabled { + mi := &file_refs_grpc_types_proto_msgTypes[8] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) + } +} + +func (x *Checksum) String() string { + return protoimpl.X.MessageStringOf(x) +} + +func (*Checksum) ProtoMessage() {} + +func (x *Checksum) ProtoReflect() protoreflect.Message { + mi := &file_refs_grpc_types_proto_msgTypes[8] + if protoimpl.UnsafeEnabled && x != nil { + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + if ms.LoadMessageInfo() == nil { + ms.StoreMessageInfo(mi) + } + return ms + } + return mi.MessageOf(x) +} + +// Deprecated: Use Checksum.ProtoReflect.Descriptor instead. +func (*Checksum) Descriptor() ([]byte, []int) { + return file_refs_grpc_types_proto_rawDescGZIP(), []int{8} +} + +func (x *Checksum) GetType() ChecksumType { + if x != nil { + return x.Type + } + return ChecksumType_CHECKSUM_TYPE_UNSPECIFIED +} + +func (x *Checksum) GetSum() []byte { + if x != nil { + return x.Sum + } + return nil +} + +var File_refs_grpc_types_proto protoreflect.FileDescriptor + +var file_refs_grpc_types_proto_rawDesc = []byte{ + 0x0a, 0x15, 0x72, 0x65, 0x66, 0x73, 0x2f, 0x67, 0x72, 0x70, 0x63, 0x2f, 0x74, 0x79, 0x70, 0x65, + 0x73, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x12, 0x0e, 0x6e, 0x65, 0x6f, 0x2e, 0x66, 0x73, 0x2e, + 0x76, 0x32, 0x2e, 0x72, 0x65, 0x66, 0x73, 0x22, 0x80, 0x01, 0x0a, 0x07, 0x41, 0x64, 0x64, 0x72, + 0x65, 0x73, 0x73, 0x12, 0x3e, 0x0a, 0x0c, 0x63, 0x6f, 0x6e, 0x74, 0x61, 0x69, 0x6e, 0x65, 0x72, + 0x5f, 0x69, 0x64, 0x18, 0x01, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x1b, 0x2e, 0x6e, 0x65, 0x6f, 0x2e, + 0x66, 0x73, 0x2e, 0x76, 0x32, 0x2e, 0x72, 0x65, 0x66, 0x73, 0x2e, 0x43, 0x6f, 0x6e, 0x74, 0x61, + 0x69, 0x6e, 0x65, 0x72, 0x49, 0x44, 0x52, 0x0b, 0x63, 0x6f, 0x6e, 0x74, 0x61, 0x69, 0x6e, 0x65, + 0x72, 0x49, 0x44, 0x12, 0x35, 0x0a, 0x09, 0x6f, 0x62, 0x6a, 0x65, 0x63, 0x74, 0x5f, 0x69, 0x64, + 0x18, 0x02, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x18, 0x2e, 0x6e, 0x65, 0x6f, 0x2e, 0x66, 0x73, 0x2e, + 0x76, 0x32, 0x2e, 0x72, 0x65, 0x66, 0x73, 0x2e, 0x4f, 0x62, 0x6a, 0x65, 0x63, 0x74, 0x49, 0x44, + 0x52, 0x08, 0x6f, 0x62, 0x6a, 0x65, 0x63, 0x74, 0x49, 0x44, 0x22, 0x20, 0x0a, 0x08, 0x4f, 0x62, + 0x6a, 0x65, 0x63, 0x74, 0x49, 0x44, 0x12, 0x14, 0x0a, 0x05, 0x76, 0x61, 0x6c, 0x75, 0x65, 0x18, + 0x01, 0x20, 0x01, 0x28, 0x0c, 0x52, 0x05, 0x76, 0x61, 0x6c, 0x75, 0x65, 0x22, 0x23, 0x0a, 0x0b, + 0x43, 0x6f, 0x6e, 0x74, 0x61, 0x69, 0x6e, 0x65, 0x72, 0x49, 0x44, 0x12, 0x14, 0x0a, 0x05, 0x76, + 0x61, 0x6c, 0x75, 0x65, 0x18, 0x01, 0x20, 0x01, 0x28, 0x0c, 0x52, 0x05, 0x76, 0x61, 0x6c, 0x75, + 0x65, 0x22, 0x1f, 0x0a, 0x07, 0x4f, 0x77, 0x6e, 0x65, 0x72, 0x49, 0x44, 0x12, 0x14, 0x0a, 0x05, + 0x76, 0x61, 0x6c, 0x75, 0x65, 0x18, 0x01, 0x20, 0x01, 0x28, 0x0c, 0x52, 0x05, 0x76, 0x61, 0x6c, + 0x75, 0x65, 0x22, 0x20, 0x0a, 0x08, 0x53, 0x75, 0x62, 0x6e, 0x65, 0x74, 0x49, 0x44, 0x12, 0x14, + 0x0a, 0x05, 0x76, 0x61, 0x6c, 0x75, 0x65, 0x18, 0x01, 0x20, 0x01, 0x28, 0x07, 0x52, 0x05, 0x76, + 0x61, 0x6c, 0x75, 0x65, 0x22, 0x35, 0x0a, 0x07, 0x56, 0x65, 0x72, 0x73, 0x69, 0x6f, 0x6e, 0x12, + 0x14, 0x0a, 0x05, 0x6d, 0x61, 0x6a, 0x6f, 0x72, 0x18, 0x01, 0x20, 0x01, 0x28, 0x0d, 0x52, 0x05, + 0x6d, 0x61, 0x6a, 0x6f, 0x72, 0x12, 0x14, 0x0a, 0x05, 0x6d, 0x69, 0x6e, 0x6f, 0x72, 0x18, 0x02, + 0x20, 0x01, 0x28, 0x0d, 0x52, 0x05, 0x6d, 0x69, 0x6e, 0x6f, 0x72, 0x22, 0x6f, 0x0a, 0x09, 0x53, + 0x69, 0x67, 0x6e, 0x61, 0x74, 0x75, 0x72, 0x65, 0x12, 0x10, 0x0a, 0x03, 0x6b, 0x65, 0x79, 0x18, + 0x01, 0x20, 0x01, 0x28, 0x0c, 0x52, 0x03, 0x6b, 0x65, 0x79, 0x12, 0x17, 0x0a, 0x04, 0x73, 0x69, + 0x67, 0x6e, 0x18, 0x02, 0x20, 0x01, 0x28, 0x0c, 0x52, 0x09, 0x73, 0x69, 0x67, 0x6e, 0x61, 0x74, + 0x75, 0x72, 0x65, 0x12, 0x37, 0x0a, 0x06, 0x73, 0x63, 0x68, 0x65, 0x6d, 0x65, 0x18, 0x03, 0x20, + 0x01, 0x28, 0x0e, 0x32, 0x1f, 0x2e, 0x6e, 0x65, 0x6f, 0x2e, 0x66, 0x73, 0x2e, 0x76, 0x32, 0x2e, + 0x72, 0x65, 0x66, 0x73, 0x2e, 0x53, 0x69, 0x67, 0x6e, 0x61, 0x74, 0x75, 0x72, 0x65, 0x53, 0x63, + 0x68, 0x65, 0x6d, 0x65, 0x52, 0x06, 0x73, 0x63, 0x68, 0x65, 0x6d, 0x65, 0x22, 0x3d, 0x0a, 0x10, + 0x53, 0x69, 0x67, 0x6e, 0x61, 0x74, 0x75, 0x72, 0x65, 0x52, 0x46, 0x43, 0x36, 0x39, 0x37, 0x39, + 0x12, 0x10, 0x0a, 0x03, 0x6b, 0x65, 0x79, 0x18, 0x01, 0x20, 0x01, 0x28, 0x0c, 0x52, 0x03, 0x6b, + 0x65, 0x79, 0x12, 0x17, 0x0a, 0x04, 0x73, 0x69, 0x67, 0x6e, 0x18, 0x02, 0x20, 0x01, 0x28, 0x0c, + 0x52, 0x09, 0x73, 0x69, 0x67, 0x6e, 0x61, 0x74, 0x75, 0x72, 0x65, 0x22, 0x4e, 0x0a, 0x08, 0x43, + 0x68, 0x65, 0x63, 0x6b, 0x73, 0x75, 0x6d, 0x12, 0x30, 0x0a, 0x04, 0x74, 0x79, 0x70, 0x65, 0x18, + 0x01, 0x20, 0x01, 0x28, 0x0e, 0x32, 0x1c, 0x2e, 0x6e, 0x65, 0x6f, 0x2e, 0x66, 0x73, 0x2e, 0x76, + 0x32, 0x2e, 0x72, 0x65, 0x66, 0x73, 0x2e, 0x43, 0x68, 0x65, 0x63, 0x6b, 0x73, 0x75, 0x6d, 0x54, + 0x79, 0x70, 0x65, 0x52, 0x04, 0x74, 0x79, 0x70, 0x65, 0x12, 0x10, 0x0a, 0x03, 0x73, 0x75, 0x6d, + 0x18, 0x02, 0x20, 0x01, 0x28, 0x0c, 0x52, 0x03, 0x73, 0x75, 0x6d, 0x2a, 0x66, 0x0a, 0x0f, 0x53, + 0x69, 0x67, 0x6e, 0x61, 0x74, 0x75, 0x72, 0x65, 0x53, 0x63, 0x68, 0x65, 0x6d, 0x65, 0x12, 0x10, + 0x0a, 0x0c, 0x45, 0x43, 0x44, 0x53, 0x41, 0x5f, 0x53, 0x48, 0x41, 0x35, 0x31, 0x32, 0x10, 0x00, + 0x12, 0x18, 0x0a, 0x14, 0x45, 0x43, 0x44, 0x53, 0x41, 0x5f, 0x52, 0x46, 0x43, 0x36, 0x39, 0x37, + 0x39, 0x5f, 0x53, 0x48, 0x41, 0x32, 0x35, 0x36, 0x10, 0x01, 0x12, 0x27, 0x0a, 0x23, 0x45, 0x43, + 0x44, 0x53, 0x41, 0x5f, 0x52, 0x46, 0x43, 0x36, 0x39, 0x37, 0x39, 0x5f, 0x53, 0x48, 0x41, 0x32, + 0x35, 0x36, 0x5f, 0x57, 0x41, 0x4c, 0x4c, 0x45, 0x54, 0x5f, 0x43, 0x4f, 0x4e, 0x4e, 0x45, 0x43, + 0x54, 0x10, 0x02, 0x2a, 0x41, 0x0a, 0x0c, 0x43, 0x68, 0x65, 0x63, 0x6b, 0x73, 0x75, 0x6d, 0x54, + 0x79, 0x70, 0x65, 0x12, 0x1d, 0x0a, 0x19, 0x43, 0x48, 0x45, 0x43, 0x4b, 0x53, 0x55, 0x4d, 0x5f, + 0x54, 0x59, 0x50, 0x45, 0x5f, 0x55, 0x4e, 0x53, 0x50, 0x45, 0x43, 0x49, 0x46, 0x49, 0x45, 0x44, + 0x10, 0x00, 0x12, 0x06, 0x0a, 0x02, 0x54, 0x5a, 0x10, 0x01, 0x12, 0x0a, 0x0a, 0x06, 0x53, 0x48, + 0x41, 0x32, 0x35, 0x36, 0x10, 0x02, 0x42, 0x50, 0x5a, 0x33, 0x67, 0x69, 0x74, 0x68, 0x75, 0x62, + 0x2e, 0x63, 0x6f, 0x6d, 0x2f, 0x6e, 0x73, 0x70, 0x63, 0x63, 0x2d, 0x64, 0x65, 0x76, 0x2f, 0x6e, + 0x65, 0x6f, 0x66, 0x73, 0x2d, 0x61, 0x70, 0x69, 0x2d, 0x67, 0x6f, 0x2f, 0x76, 0x32, 0x2f, 0x72, + 0x65, 0x66, 0x73, 0x2f, 0x67, 0x72, 0x70, 0x63, 0x3b, 0x72, 0x65, 0x66, 0x73, 0xaa, 0x02, 0x18, + 0x4e, 0x65, 0x6f, 0x2e, 0x46, 0x69, 0x6c, 0x65, 0x53, 0x74, 0x6f, 0x72, 0x61, 0x67, 0x65, 0x2e, + 0x41, 0x50, 0x49, 0x2e, 0x52, 0x65, 0x66, 0x73, 0x62, 0x06, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x33, +} + +var ( + file_refs_grpc_types_proto_rawDescOnce sync.Once + file_refs_grpc_types_proto_rawDescData = file_refs_grpc_types_proto_rawDesc +) + +func file_refs_grpc_types_proto_rawDescGZIP() []byte { + file_refs_grpc_types_proto_rawDescOnce.Do(func() { + file_refs_grpc_types_proto_rawDescData = protoimpl.X.CompressGZIP(file_refs_grpc_types_proto_rawDescData) + }) + return file_refs_grpc_types_proto_rawDescData +} + +var file_refs_grpc_types_proto_enumTypes = make([]protoimpl.EnumInfo, 2) +var file_refs_grpc_types_proto_msgTypes = make([]protoimpl.MessageInfo, 9) +var file_refs_grpc_types_proto_goTypes = []interface{}{ + (SignatureScheme)(0), // 0: neo.fs.v2.refs.SignatureScheme + (ChecksumType)(0), // 1: neo.fs.v2.refs.ChecksumType + (*Address)(nil), // 2: neo.fs.v2.refs.Address + (*ObjectID)(nil), // 3: neo.fs.v2.refs.ObjectID + (*ContainerID)(nil), // 4: neo.fs.v2.refs.ContainerID + (*OwnerID)(nil), // 5: neo.fs.v2.refs.OwnerID + (*SubnetID)(nil), // 6: neo.fs.v2.refs.SubnetID + (*Version)(nil), // 7: neo.fs.v2.refs.Version + (*Signature)(nil), // 8: neo.fs.v2.refs.Signature + (*SignatureRFC6979)(nil), // 9: neo.fs.v2.refs.SignatureRFC6979 + (*Checksum)(nil), // 10: neo.fs.v2.refs.Checksum +} +var file_refs_grpc_types_proto_depIdxs = []int32{ + 4, // 0: neo.fs.v2.refs.Address.container_id:type_name -> neo.fs.v2.refs.ContainerID + 3, // 1: neo.fs.v2.refs.Address.object_id:type_name -> neo.fs.v2.refs.ObjectID + 0, // 2: neo.fs.v2.refs.Signature.scheme:type_name -> neo.fs.v2.refs.SignatureScheme + 1, // 3: neo.fs.v2.refs.Checksum.type:type_name -> neo.fs.v2.refs.ChecksumType + 4, // [4:4] is the sub-list for method output_type + 4, // [4:4] is the sub-list for method input_type + 4, // [4:4] is the sub-list for extension type_name + 4, // [4:4] is the sub-list for extension extendee + 0, // [0:4] is the sub-list for field type_name +} + +func init() { file_refs_grpc_types_proto_init() } +func file_refs_grpc_types_proto_init() { + if File_refs_grpc_types_proto != nil { + return + } + if !protoimpl.UnsafeEnabled { + file_refs_grpc_types_proto_msgTypes[0].Exporter = func(v interface{}, i int) interface{} { + switch v := v.(*Address); i { + case 0: + return &v.state + case 1: + return &v.sizeCache + case 2: + return &v.unknownFields + default: + return nil + } + } + file_refs_grpc_types_proto_msgTypes[1].Exporter = func(v interface{}, i int) interface{} { + switch v := v.(*ObjectID); i { + case 0: + return &v.state + case 1: + return &v.sizeCache + case 2: + return &v.unknownFields + default: + return nil + } + } + file_refs_grpc_types_proto_msgTypes[2].Exporter = func(v interface{}, i int) interface{} { + switch v := v.(*ContainerID); i { + case 0: + return &v.state + case 1: + return &v.sizeCache + case 2: + return &v.unknownFields + default: + return nil + } + } + file_refs_grpc_types_proto_msgTypes[3].Exporter = func(v interface{}, i int) interface{} { + switch v := v.(*OwnerID); i { + case 0: + return &v.state + case 1: + return &v.sizeCache + case 2: + return &v.unknownFields + default: + return nil + } + } + file_refs_grpc_types_proto_msgTypes[4].Exporter = func(v interface{}, i int) interface{} { + switch v := v.(*SubnetID); i { + case 0: + return &v.state + case 1: + return &v.sizeCache + case 2: + return &v.unknownFields + default: + return nil + } + } + file_refs_grpc_types_proto_msgTypes[5].Exporter = func(v interface{}, i int) interface{} { + switch v := v.(*Version); i { + case 0: + return &v.state + case 1: + return &v.sizeCache + case 2: + return &v.unknownFields + default: + return nil + } + } + file_refs_grpc_types_proto_msgTypes[6].Exporter = func(v interface{}, i int) interface{} { + switch v := v.(*Signature); i { + case 0: + return &v.state + case 1: + return &v.sizeCache + case 2: + return &v.unknownFields + default: + return nil + } + } + file_refs_grpc_types_proto_msgTypes[7].Exporter = func(v interface{}, i int) interface{} { + switch v := v.(*SignatureRFC6979); i { + case 0: + return &v.state + case 1: + return &v.sizeCache + case 2: + return &v.unknownFields + default: + return nil + } + } + file_refs_grpc_types_proto_msgTypes[8].Exporter = func(v interface{}, i int) interface{} { + switch v := v.(*Checksum); i { + case 0: + return &v.state + case 1: + return &v.sizeCache + case 2: + return &v.unknownFields + default: + return nil + } + } + } + type x struct{} + out := protoimpl.TypeBuilder{ + File: protoimpl.DescBuilder{ + GoPackagePath: reflect.TypeOf(x{}).PkgPath(), + RawDescriptor: file_refs_grpc_types_proto_rawDesc, + NumEnums: 2, + NumMessages: 9, + NumExtensions: 0, + NumServices: 0, + }, + GoTypes: file_refs_grpc_types_proto_goTypes, + DependencyIndexes: file_refs_grpc_types_proto_depIdxs, + EnumInfos: file_refs_grpc_types_proto_enumTypes, + MessageInfos: file_refs_grpc_types_proto_msgTypes, + }.Build() + File_refs_grpc_types_proto = out.File + file_refs_grpc_types_proto_rawDesc = nil + file_refs_grpc_types_proto_goTypes = nil + file_refs_grpc_types_proto_depIdxs = nil +} diff --git a/api/reputation/encoding.go b/api/reputation/encoding.go new file mode 100644 index 000000000..2cfcb57d3 --- /dev/null +++ b/api/reputation/encoding.go @@ -0,0 +1,145 @@ +package reputation + +import "github.com/nspcc-dev/neofs-sdk-go/internal/proto" + +const ( + _ = iota + fieldPeerPubKey +) + +func (x *PeerID) MarshaledSize() int { + var sz int + if x != nil { + sz = proto.SizeBytes(fieldPeerPubKey, x.PublicKey) + } + return sz +} + +func (x *PeerID) MarshalStable(b []byte) { + if x != nil { + proto.MarshalBytes(b, fieldPeerPubKey, x.PublicKey) + } +} + +const ( + _ = iota + fieldTrustPeer + fieldTrustValue +) + +func (x *Trust) MarshaledSize() int { + var sz int + if x != nil { + sz = proto.SizeNested(fieldTrustPeer, x.Peer) + + proto.SizeFloat64(fieldTrustValue, x.Value) + } + return sz +} + +func (x *Trust) MarshalStable(b []byte) { + if x != nil { + off := proto.MarshalNested(b, fieldTrustPeer, x.Peer) + proto.MarshalFloat64(b[off:], fieldTrustValue, x.Value) + } +} + +const ( + _ = iota + fieldP2PTrustPeer + fieldP2PTrustValue +) + +func (x *PeerToPeerTrust) MarshaledSize() int { + var sz int + if x != nil { + sz = proto.SizeNested(fieldP2PTrustPeer, x.TrustingPeer) + + proto.SizeNested(fieldP2PTrustValue, x.Trust) + } + return sz +} + +func (x *PeerToPeerTrust) MarshalStable(b []byte) { + if x != nil { + off := proto.MarshalNested(b, fieldP2PTrustPeer, x.TrustingPeer) + proto.MarshalNested(b[off:], fieldP2PTrustValue, x.Trust) + } +} + +const ( + _ = iota + fieldGlobalTrustBodyManager + fieldGlobalTrustBodyValue +) + +func (x *GlobalTrust_Body) MarshaledSize() int { + var sz int + if x != nil { + sz = proto.SizeNested(fieldGlobalTrustBodyManager, x.Manager) + + proto.SizeNested(fieldGlobalTrustBodyValue, x.Trust) + } + return sz +} + +func (x *GlobalTrust_Body) MarshalStable(b []byte) { + if x != nil { + off := proto.MarshalNested(b, fieldGlobalTrustBodyManager, x.Manager) + proto.MarshalNested(b[off:], fieldGlobalTrustBodyValue, x.Trust) + } +} + +const ( + _ = iota + fieldAnnounceLocalReqEpoch + fieldAnnounceLocalReqTrusts +) + +func (x *AnnounceLocalTrustRequest_Body) MarshaledSize() int { + var sz int + if x != nil { + sz = proto.SizeVarint(fieldAnnounceLocalReqEpoch, x.Epoch) + for i := range x.Trusts { + sz += proto.SizeNested(fieldAnnounceLocalReqTrusts, x.Trusts[i]) + } + } + return sz +} + +func (x *AnnounceLocalTrustRequest_Body) MarshalStable(b []byte) { + if x != nil { + off := proto.MarshalVarint(b, fieldAnnounceLocalReqEpoch, x.Epoch) + for i := range x.Trusts { + off += proto.MarshalNested(b[off:], fieldAnnounceLocalReqTrusts, x.Trusts[i]) + } + } +} + +func (x *AnnounceLocalTrustResponse_Body) MarshaledSize() int { return 0 } +func (x *AnnounceLocalTrustResponse_Body) MarshalStable([]byte) {} + +const ( + _ = iota + fieldAnnounceIntermediateReqEpoch + fieldAnnounceIntermediateReqIter + fieldAnnounceIntermediateReqTrust +) + +func (x *AnnounceIntermediateResultRequest_Body) MarshaledSize() int { + var sz int + if x != nil { + sz = proto.SizeVarint(fieldAnnounceIntermediateReqEpoch, x.Epoch) + + proto.SizeVarint(fieldAnnounceIntermediateReqIter, x.Iteration) + + proto.SizeNested(fieldAnnounceIntermediateReqTrust, x.Trust) + } + return sz +} + +func (x *AnnounceIntermediateResultRequest_Body) MarshalStable(b []byte) { + if x != nil { + off := proto.MarshalVarint(b, fieldAnnounceIntermediateReqEpoch, x.Epoch) + off += proto.MarshalVarint(b[off:], fieldAnnounceIntermediateReqIter, x.Iteration) + proto.MarshalNested(b[off:], fieldAnnounceIntermediateReqIter, x.Trust) + } +} + +func (x *AnnounceIntermediateResultResponse_Body) MarshaledSize() int { return 0 } +func (x *AnnounceIntermediateResultResponse_Body) MarshalStable([]byte) {} diff --git a/api/reputation/encoding_test.go b/api/reputation/encoding_test.go new file mode 100644 index 000000000..455f4070f --- /dev/null +++ b/api/reputation/encoding_test.go @@ -0,0 +1,95 @@ +package reputation_test + +import ( + "testing" + + "github.com/nspcc-dev/neofs-sdk-go/api/reputation" + "github.com/stretchr/testify/require" + "google.golang.org/protobuf/proto" +) + +func TestGlobalTrust_Body(t *testing.T) { + v := &reputation.GlobalTrust_Body{ + Manager: &reputation.PeerID{PublicKey: []byte("any_manager_key")}, + Trust: &reputation.Trust{ + Peer: &reputation.PeerID{PublicKey: []byte("any_peer_key")}, + Value: 0.5, + }, + } + + sz := v.MarshaledSize() + b := make([]byte, sz) + v.MarshalStable(b) + + var res reputation.GlobalTrust_Body + err := proto.Unmarshal(b, &res) + require.NoError(t, err) + require.Empty(t, res.ProtoReflect().GetUnknown()) + require.Equal(t, v.Manager, res.Manager) + require.Equal(t, v.Trust, res.Trust) +} + +func TestAnnounceLocalTrustRequest_Body(t *testing.T) { + v := &reputation.AnnounceLocalTrustRequest_Body{ + Epoch: 1, + Trusts: []*reputation.Trust{ + {Peer: &reputation.PeerID{PublicKey: []byte("any_public_key1")}, Value: 2.3}, + {Peer: &reputation.PeerID{PublicKey: []byte("any_public_key2")}, Value: 3.4}, + }, + } + + sz := v.MarshaledSize() + b := make([]byte, sz) + v.MarshalStable(b) + + var res reputation.AnnounceLocalTrustRequest_Body + err := proto.Unmarshal(b, &res) + require.NoError(t, err) + require.Empty(t, res.ProtoReflect().GetUnknown()) + require.Equal(t, v.Epoch, res.Epoch) + require.Equal(t, v.Trusts, res.Trusts) +} + +func TestAnnounceLocalTrustResponse_Body(t *testing.T) { + var v reputation.AnnounceLocalTrustResponse_Body + require.Zero(t, v.MarshaledSize()) + require.NotPanics(t, func() { v.MarshalStable(nil) }) + b := []byte("not_a_protobuf") + v.MarshalStable(b) + require.EqualValues(t, "not_a_protobuf", b) +} + +func TestAnnounceIntermediateResultRequest_Body(t *testing.T) { + v := &reputation.AnnounceIntermediateResultRequest_Body{ + Epoch: 1, + Iteration: 2, + Trust: &reputation.PeerToPeerTrust{ + TrustingPeer: &reputation.PeerID{PublicKey: []byte("any_public_key1")}, + Trust: &reputation.Trust{ + Peer: &reputation.PeerID{PublicKey: []byte("any_public_key2")}, + Value: 3.4, + }, + }, + } + + sz := v.MarshaledSize() + b := make([]byte, sz) + v.MarshalStable(b) + + var res reputation.AnnounceIntermediateResultRequest_Body + err := proto.Unmarshal(b, &res) + require.NoError(t, err) + require.Empty(t, res.ProtoReflect().GetUnknown()) + require.Equal(t, v.Epoch, res.Epoch) + require.Equal(t, v.Iteration, res.Iteration) + require.Equal(t, v.Trust, res.Trust) +} + +func TestAnnounceIntermediateResultResponse_Body(t *testing.T) { + var v reputation.AnnounceIntermediateResultResponse_Body + require.Zero(t, v.MarshaledSize()) + require.NotPanics(t, func() { v.MarshalStable(nil) }) + b := []byte("not_a_protobuf") + v.MarshalStable(b) + require.EqualValues(t, "not_a_protobuf", b) +} diff --git a/api/reputation/service.pb.go b/api/reputation/service.pb.go new file mode 100644 index 000000000..18879dbec --- /dev/null +++ b/api/reputation/service.pb.go @@ -0,0 +1,809 @@ +// Code generated by protoc-gen-go. DO NOT EDIT. +// versions: +// protoc-gen-go v1.32.0 +// protoc v4.25.1 +// source: reputation/grpc/service.proto + +package reputation + +import ( + reflect "reflect" + sync "sync" + + "github.com/nspcc-dev/neofs-sdk-go/api/session" + protoreflect "google.golang.org/protobuf/reflect/protoreflect" + protoimpl "google.golang.org/protobuf/runtime/protoimpl" +) + +const ( + // Verify that this generated code is sufficiently up-to-date. + _ = protoimpl.EnforceVersion(20 - protoimpl.MinVersion) + // Verify that runtime/protoimpl is sufficiently up-to-date. + _ = protoimpl.EnforceVersion(protoimpl.MaxVersion - 20) +) + +// Announce node's local trust information. +type AnnounceLocalTrustRequest struct { + state protoimpl.MessageState + sizeCache protoimpl.SizeCache + unknownFields protoimpl.UnknownFields + + // Body of the request message. + Body *AnnounceLocalTrustRequest_Body `protobuf:"bytes,1,opt,name=body,proto3" json:"body,omitempty"` + // Carries request meta information. Header data is used only to regulate + // message transport and does not affect request execution. + MetaHeader *session.RequestMetaHeader `protobuf:"bytes,2,opt,name=meta_header,json=metaHeader,proto3" json:"meta_header,omitempty"` + // Carries request verification information. This header is used to + // authenticate the nodes of the message route and check the correctness of + // transmission. + VerifyHeader *session.RequestVerificationHeader `protobuf:"bytes,3,opt,name=verify_header,json=verifyHeader,proto3" json:"verify_header,omitempty"` +} + +func (x *AnnounceLocalTrustRequest) Reset() { + *x = AnnounceLocalTrustRequest{} + if protoimpl.UnsafeEnabled { + mi := &file_reputation_grpc_service_proto_msgTypes[0] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) + } +} + +func (x *AnnounceLocalTrustRequest) String() string { + return protoimpl.X.MessageStringOf(x) +} + +func (*AnnounceLocalTrustRequest) ProtoMessage() {} + +func (x *AnnounceLocalTrustRequest) ProtoReflect() protoreflect.Message { + mi := &file_reputation_grpc_service_proto_msgTypes[0] + if protoimpl.UnsafeEnabled && x != nil { + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + if ms.LoadMessageInfo() == nil { + ms.StoreMessageInfo(mi) + } + return ms + } + return mi.MessageOf(x) +} + +// Deprecated: Use AnnounceLocalTrustRequest.ProtoReflect.Descriptor instead. +func (*AnnounceLocalTrustRequest) Descriptor() ([]byte, []int) { + return file_reputation_grpc_service_proto_rawDescGZIP(), []int{0} +} + +func (x *AnnounceLocalTrustRequest) GetBody() *AnnounceLocalTrustRequest_Body { + if x != nil { + return x.Body + } + return nil +} + +func (x *AnnounceLocalTrustRequest) GetMetaHeader() *session.RequestMetaHeader { + if x != nil { + return x.MetaHeader + } + return nil +} + +func (x *AnnounceLocalTrustRequest) GetVerifyHeader() *session.RequestVerificationHeader { + if x != nil { + return x.VerifyHeader + } + return nil +} + +// Node's local trust information announcement response. +type AnnounceLocalTrustResponse struct { + state protoimpl.MessageState + sizeCache protoimpl.SizeCache + unknownFields protoimpl.UnknownFields + + // Body of the response message. + Body *AnnounceLocalTrustResponse_Body `protobuf:"bytes,1,opt,name=body,proto3" json:"body,omitempty"` + // Carries response meta information. Header data is used only to regulate + // message transport and does not affect request execution. + MetaHeader *session.ResponseMetaHeader `protobuf:"bytes,2,opt,name=meta_header,json=metaHeader,proto3" json:"meta_header,omitempty"` + // Carries response verification information. This header is used to + // authenticate the nodes of the message route and check the correctness of + // transmission. + VerifyHeader *session.ResponseVerificationHeader `protobuf:"bytes,3,opt,name=verify_header,json=verifyHeader,proto3" json:"verify_header,omitempty"` +} + +func (x *AnnounceLocalTrustResponse) Reset() { + *x = AnnounceLocalTrustResponse{} + if protoimpl.UnsafeEnabled { + mi := &file_reputation_grpc_service_proto_msgTypes[1] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) + } +} + +func (x *AnnounceLocalTrustResponse) String() string { + return protoimpl.X.MessageStringOf(x) +} + +func (*AnnounceLocalTrustResponse) ProtoMessage() {} + +func (x *AnnounceLocalTrustResponse) ProtoReflect() protoreflect.Message { + mi := &file_reputation_grpc_service_proto_msgTypes[1] + if protoimpl.UnsafeEnabled && x != nil { + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + if ms.LoadMessageInfo() == nil { + ms.StoreMessageInfo(mi) + } + return ms + } + return mi.MessageOf(x) +} + +// Deprecated: Use AnnounceLocalTrustResponse.ProtoReflect.Descriptor instead. +func (*AnnounceLocalTrustResponse) Descriptor() ([]byte, []int) { + return file_reputation_grpc_service_proto_rawDescGZIP(), []int{1} +} + +func (x *AnnounceLocalTrustResponse) GetBody() *AnnounceLocalTrustResponse_Body { + if x != nil { + return x.Body + } + return nil +} + +func (x *AnnounceLocalTrustResponse) GetMetaHeader() *session.ResponseMetaHeader { + if x != nil { + return x.MetaHeader + } + return nil +} + +func (x *AnnounceLocalTrustResponse) GetVerifyHeader() *session.ResponseVerificationHeader { + if x != nil { + return x.VerifyHeader + } + return nil +} + +// Announce intermediate global trust information. +type AnnounceIntermediateResultRequest struct { + state protoimpl.MessageState + sizeCache protoimpl.SizeCache + unknownFields protoimpl.UnknownFields + + // Body of the request message. + Body *AnnounceIntermediateResultRequest_Body `protobuf:"bytes,1,opt,name=body,proto3" json:"body,omitempty"` + // Carries request meta information. Header data is used only to regulate + // message transport and does not affect request execution. + MetaHeader *session.RequestMetaHeader `protobuf:"bytes,2,opt,name=meta_header,json=metaHeader,proto3" json:"meta_header,omitempty"` + // Carries request verification information. This header is used to + // authenticate the nodes of the message route and check the correctness of + // transmission. + VerifyHeader *session.RequestVerificationHeader `protobuf:"bytes,3,opt,name=verify_header,json=verifyHeader,proto3" json:"verify_header,omitempty"` +} + +func (x *AnnounceIntermediateResultRequest) Reset() { + *x = AnnounceIntermediateResultRequest{} + if protoimpl.UnsafeEnabled { + mi := &file_reputation_grpc_service_proto_msgTypes[2] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) + } +} + +func (x *AnnounceIntermediateResultRequest) String() string { + return protoimpl.X.MessageStringOf(x) +} + +func (*AnnounceIntermediateResultRequest) ProtoMessage() {} + +func (x *AnnounceIntermediateResultRequest) ProtoReflect() protoreflect.Message { + mi := &file_reputation_grpc_service_proto_msgTypes[2] + if protoimpl.UnsafeEnabled && x != nil { + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + if ms.LoadMessageInfo() == nil { + ms.StoreMessageInfo(mi) + } + return ms + } + return mi.MessageOf(x) +} + +// Deprecated: Use AnnounceIntermediateResultRequest.ProtoReflect.Descriptor instead. +func (*AnnounceIntermediateResultRequest) Descriptor() ([]byte, []int) { + return file_reputation_grpc_service_proto_rawDescGZIP(), []int{2} +} + +func (x *AnnounceIntermediateResultRequest) GetBody() *AnnounceIntermediateResultRequest_Body { + if x != nil { + return x.Body + } + return nil +} + +func (x *AnnounceIntermediateResultRequest) GetMetaHeader() *session.RequestMetaHeader { + if x != nil { + return x.MetaHeader + } + return nil +} + +func (x *AnnounceIntermediateResultRequest) GetVerifyHeader() *session.RequestVerificationHeader { + if x != nil { + return x.VerifyHeader + } + return nil +} + +// Intermediate global trust information announcement response. +type AnnounceIntermediateResultResponse struct { + state protoimpl.MessageState + sizeCache protoimpl.SizeCache + unknownFields protoimpl.UnknownFields + + // Body of the response message. + Body *AnnounceIntermediateResultResponse_Body `protobuf:"bytes,1,opt,name=body,proto3" json:"body,omitempty"` + // Carries response meta information. Header data is used only to regulate + // message transport and does not affect request execution. + MetaHeader *session.ResponseMetaHeader `protobuf:"bytes,2,opt,name=meta_header,json=metaHeader,proto3" json:"meta_header,omitempty"` + // Carries response verification information. This header is used to + // authenticate the nodes of the message route and check the correctness of + // transmission. + VerifyHeader *session.ResponseVerificationHeader `protobuf:"bytes,3,opt,name=verify_header,json=verifyHeader,proto3" json:"verify_header,omitempty"` +} + +func (x *AnnounceIntermediateResultResponse) Reset() { + *x = AnnounceIntermediateResultResponse{} + if protoimpl.UnsafeEnabled { + mi := &file_reputation_grpc_service_proto_msgTypes[3] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) + } +} + +func (x *AnnounceIntermediateResultResponse) String() string { + return protoimpl.X.MessageStringOf(x) +} + +func (*AnnounceIntermediateResultResponse) ProtoMessage() {} + +func (x *AnnounceIntermediateResultResponse) ProtoReflect() protoreflect.Message { + mi := &file_reputation_grpc_service_proto_msgTypes[3] + if protoimpl.UnsafeEnabled && x != nil { + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + if ms.LoadMessageInfo() == nil { + ms.StoreMessageInfo(mi) + } + return ms + } + return mi.MessageOf(x) +} + +// Deprecated: Use AnnounceIntermediateResultResponse.ProtoReflect.Descriptor instead. +func (*AnnounceIntermediateResultResponse) Descriptor() ([]byte, []int) { + return file_reputation_grpc_service_proto_rawDescGZIP(), []int{3} +} + +func (x *AnnounceIntermediateResultResponse) GetBody() *AnnounceIntermediateResultResponse_Body { + if x != nil { + return x.Body + } + return nil +} + +func (x *AnnounceIntermediateResultResponse) GetMetaHeader() *session.ResponseMetaHeader { + if x != nil { + return x.MetaHeader + } + return nil +} + +func (x *AnnounceIntermediateResultResponse) GetVerifyHeader() *session.ResponseVerificationHeader { + if x != nil { + return x.VerifyHeader + } + return nil +} + +// Announce node's local trust information. +type AnnounceLocalTrustRequest_Body struct { + state protoimpl.MessageState + sizeCache protoimpl.SizeCache + unknownFields protoimpl.UnknownFields + + // Trust assessment Epoch number + Epoch uint64 `protobuf:"varint,1,opt,name=epoch,proto3" json:"epoch,omitempty"` + // List of normalized local trust values to other NeoFS nodes. The value + // is calculated according to EigenTrust++ algorithm and must be a + // floating point number in [0;1] range. + Trusts []*Trust `protobuf:"bytes,2,rep,name=trusts,proto3" json:"trusts,omitempty"` +} + +func (x *AnnounceLocalTrustRequest_Body) Reset() { + *x = AnnounceLocalTrustRequest_Body{} + if protoimpl.UnsafeEnabled { + mi := &file_reputation_grpc_service_proto_msgTypes[4] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) + } +} + +func (x *AnnounceLocalTrustRequest_Body) String() string { + return protoimpl.X.MessageStringOf(x) +} + +func (*AnnounceLocalTrustRequest_Body) ProtoMessage() {} + +func (x *AnnounceLocalTrustRequest_Body) ProtoReflect() protoreflect.Message { + mi := &file_reputation_grpc_service_proto_msgTypes[4] + if protoimpl.UnsafeEnabled && x != nil { + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + if ms.LoadMessageInfo() == nil { + ms.StoreMessageInfo(mi) + } + return ms + } + return mi.MessageOf(x) +} + +// Deprecated: Use AnnounceLocalTrustRequest_Body.ProtoReflect.Descriptor instead. +func (*AnnounceLocalTrustRequest_Body) Descriptor() ([]byte, []int) { + return file_reputation_grpc_service_proto_rawDescGZIP(), []int{0, 0} +} + +func (x *AnnounceLocalTrustRequest_Body) GetEpoch() uint64 { + if x != nil { + return x.Epoch + } + return 0 +} + +func (x *AnnounceLocalTrustRequest_Body) GetTrusts() []*Trust { + if x != nil { + return x.Trusts + } + return nil +} + +// Response to the node's local trust information announcement has an empty body +// because the trust exchange operation is asynchronous. If Trust information +// does not pass sanity checks, it is silently ignored. +type AnnounceLocalTrustResponse_Body struct { + state protoimpl.MessageState + sizeCache protoimpl.SizeCache + unknownFields protoimpl.UnknownFields +} + +func (x *AnnounceLocalTrustResponse_Body) Reset() { + *x = AnnounceLocalTrustResponse_Body{} + if protoimpl.UnsafeEnabled { + mi := &file_reputation_grpc_service_proto_msgTypes[5] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) + } +} + +func (x *AnnounceLocalTrustResponse_Body) String() string { + return protoimpl.X.MessageStringOf(x) +} + +func (*AnnounceLocalTrustResponse_Body) ProtoMessage() {} + +func (x *AnnounceLocalTrustResponse_Body) ProtoReflect() protoreflect.Message { + mi := &file_reputation_grpc_service_proto_msgTypes[5] + if protoimpl.UnsafeEnabled && x != nil { + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + if ms.LoadMessageInfo() == nil { + ms.StoreMessageInfo(mi) + } + return ms + } + return mi.MessageOf(x) +} + +// Deprecated: Use AnnounceLocalTrustResponse_Body.ProtoReflect.Descriptor instead. +func (*AnnounceLocalTrustResponse_Body) Descriptor() ([]byte, []int) { + return file_reputation_grpc_service_proto_rawDescGZIP(), []int{1, 0} +} + +// Announce intermediate global trust information. +type AnnounceIntermediateResultRequest_Body struct { + state protoimpl.MessageState + sizeCache protoimpl.SizeCache + unknownFields protoimpl.UnknownFields + + // Iteration execution Epoch number + Epoch uint64 `protobuf:"varint,1,opt,name=epoch,proto3" json:"epoch,omitempty"` + // Iteration sequence number + Iteration uint32 `protobuf:"varint,2,opt,name=iteration,proto3" json:"iteration,omitempty"` + // Current global trust value calculated at the specified iteration + Trust *PeerToPeerTrust `protobuf:"bytes,3,opt,name=trust,proto3" json:"trust,omitempty"` +} + +func (x *AnnounceIntermediateResultRequest_Body) Reset() { + *x = AnnounceIntermediateResultRequest_Body{} + if protoimpl.UnsafeEnabled { + mi := &file_reputation_grpc_service_proto_msgTypes[6] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) + } +} + +func (x *AnnounceIntermediateResultRequest_Body) String() string { + return protoimpl.X.MessageStringOf(x) +} + +func (*AnnounceIntermediateResultRequest_Body) ProtoMessage() {} + +func (x *AnnounceIntermediateResultRequest_Body) ProtoReflect() protoreflect.Message { + mi := &file_reputation_grpc_service_proto_msgTypes[6] + if protoimpl.UnsafeEnabled && x != nil { + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + if ms.LoadMessageInfo() == nil { + ms.StoreMessageInfo(mi) + } + return ms + } + return mi.MessageOf(x) +} + +// Deprecated: Use AnnounceIntermediateResultRequest_Body.ProtoReflect.Descriptor instead. +func (*AnnounceIntermediateResultRequest_Body) Descriptor() ([]byte, []int) { + return file_reputation_grpc_service_proto_rawDescGZIP(), []int{2, 0} +} + +func (x *AnnounceIntermediateResultRequest_Body) GetEpoch() uint64 { + if x != nil { + return x.Epoch + } + return 0 +} + +func (x *AnnounceIntermediateResultRequest_Body) GetIteration() uint32 { + if x != nil { + return x.Iteration + } + return 0 +} + +func (x *AnnounceIntermediateResultRequest_Body) GetTrust() *PeerToPeerTrust { + if x != nil { + return x.Trust + } + return nil +} + +// Response to the node's intermediate global trust information announcement has +// an empty body because the trust exchange operation is asynchronous. If +// Trust information does not pass sanity checks, it is silently ignored. +type AnnounceIntermediateResultResponse_Body struct { + state protoimpl.MessageState + sizeCache protoimpl.SizeCache + unknownFields protoimpl.UnknownFields +} + +func (x *AnnounceIntermediateResultResponse_Body) Reset() { + *x = AnnounceIntermediateResultResponse_Body{} + if protoimpl.UnsafeEnabled { + mi := &file_reputation_grpc_service_proto_msgTypes[7] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) + } +} + +func (x *AnnounceIntermediateResultResponse_Body) String() string { + return protoimpl.X.MessageStringOf(x) +} + +func (*AnnounceIntermediateResultResponse_Body) ProtoMessage() {} + +func (x *AnnounceIntermediateResultResponse_Body) ProtoReflect() protoreflect.Message { + mi := &file_reputation_grpc_service_proto_msgTypes[7] + if protoimpl.UnsafeEnabled && x != nil { + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + if ms.LoadMessageInfo() == nil { + ms.StoreMessageInfo(mi) + } + return ms + } + return mi.MessageOf(x) +} + +// Deprecated: Use AnnounceIntermediateResultResponse_Body.ProtoReflect.Descriptor instead. +func (*AnnounceIntermediateResultResponse_Body) Descriptor() ([]byte, []int) { + return file_reputation_grpc_service_proto_rawDescGZIP(), []int{3, 0} +} + +var File_reputation_grpc_service_proto protoreflect.FileDescriptor + +var file_reputation_grpc_service_proto_rawDesc = []byte{ + 0x0a, 0x1d, 0x72, 0x65, 0x70, 0x75, 0x74, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x2f, 0x67, 0x72, 0x70, + 0x63, 0x2f, 0x73, 0x65, 0x72, 0x76, 0x69, 0x63, 0x65, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x12, + 0x14, 0x6e, 0x65, 0x6f, 0x2e, 0x66, 0x73, 0x2e, 0x76, 0x32, 0x2e, 0x72, 0x65, 0x70, 0x75, 0x74, + 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x1a, 0x1b, 0x72, 0x65, 0x70, 0x75, 0x74, 0x61, 0x74, 0x69, 0x6f, + 0x6e, 0x2f, 0x67, 0x72, 0x70, 0x63, 0x2f, 0x74, 0x79, 0x70, 0x65, 0x73, 0x2e, 0x70, 0x72, 0x6f, + 0x74, 0x6f, 0x1a, 0x18, 0x73, 0x65, 0x73, 0x73, 0x69, 0x6f, 0x6e, 0x2f, 0x67, 0x72, 0x70, 0x63, + 0x2f, 0x74, 0x79, 0x70, 0x65, 0x73, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x22, 0xd2, 0x02, 0x0a, + 0x19, 0x41, 0x6e, 0x6e, 0x6f, 0x75, 0x6e, 0x63, 0x65, 0x4c, 0x6f, 0x63, 0x61, 0x6c, 0x54, 0x72, + 0x75, 0x73, 0x74, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x12, 0x48, 0x0a, 0x04, 0x62, 0x6f, + 0x64, 0x79, 0x18, 0x01, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x34, 0x2e, 0x6e, 0x65, 0x6f, 0x2e, 0x66, + 0x73, 0x2e, 0x76, 0x32, 0x2e, 0x72, 0x65, 0x70, 0x75, 0x74, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x2e, + 0x41, 0x6e, 0x6e, 0x6f, 0x75, 0x6e, 0x63, 0x65, 0x4c, 0x6f, 0x63, 0x61, 0x6c, 0x54, 0x72, 0x75, + 0x73, 0x74, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x2e, 0x42, 0x6f, 0x64, 0x79, 0x52, 0x04, + 0x62, 0x6f, 0x64, 0x79, 0x12, 0x45, 0x0a, 0x0b, 0x6d, 0x65, 0x74, 0x61, 0x5f, 0x68, 0x65, 0x61, + 0x64, 0x65, 0x72, 0x18, 0x02, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x24, 0x2e, 0x6e, 0x65, 0x6f, 0x2e, + 0x66, 0x73, 0x2e, 0x76, 0x32, 0x2e, 0x73, 0x65, 0x73, 0x73, 0x69, 0x6f, 0x6e, 0x2e, 0x52, 0x65, + 0x71, 0x75, 0x65, 0x73, 0x74, 0x4d, 0x65, 0x74, 0x61, 0x48, 0x65, 0x61, 0x64, 0x65, 0x72, 0x52, + 0x0a, 0x6d, 0x65, 0x74, 0x61, 0x48, 0x65, 0x61, 0x64, 0x65, 0x72, 0x12, 0x51, 0x0a, 0x0d, 0x76, + 0x65, 0x72, 0x69, 0x66, 0x79, 0x5f, 0x68, 0x65, 0x61, 0x64, 0x65, 0x72, 0x18, 0x03, 0x20, 0x01, + 0x28, 0x0b, 0x32, 0x2c, 0x2e, 0x6e, 0x65, 0x6f, 0x2e, 0x66, 0x73, 0x2e, 0x76, 0x32, 0x2e, 0x73, + 0x65, 0x73, 0x73, 0x69, 0x6f, 0x6e, 0x2e, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x56, 0x65, + 0x72, 0x69, 0x66, 0x69, 0x63, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x48, 0x65, 0x61, 0x64, 0x65, 0x72, + 0x52, 0x0c, 0x76, 0x65, 0x72, 0x69, 0x66, 0x79, 0x48, 0x65, 0x61, 0x64, 0x65, 0x72, 0x1a, 0x51, + 0x0a, 0x04, 0x42, 0x6f, 0x64, 0x79, 0x12, 0x14, 0x0a, 0x05, 0x65, 0x70, 0x6f, 0x63, 0x68, 0x18, + 0x01, 0x20, 0x01, 0x28, 0x04, 0x52, 0x05, 0x65, 0x70, 0x6f, 0x63, 0x68, 0x12, 0x33, 0x0a, 0x06, + 0x74, 0x72, 0x75, 0x73, 0x74, 0x73, 0x18, 0x02, 0x20, 0x03, 0x28, 0x0b, 0x32, 0x1b, 0x2e, 0x6e, + 0x65, 0x6f, 0x2e, 0x66, 0x73, 0x2e, 0x76, 0x32, 0x2e, 0x72, 0x65, 0x70, 0x75, 0x74, 0x61, 0x74, + 0x69, 0x6f, 0x6e, 0x2e, 0x54, 0x72, 0x75, 0x73, 0x74, 0x52, 0x06, 0x74, 0x72, 0x75, 0x73, 0x74, + 0x73, 0x22, 0x8b, 0x02, 0x0a, 0x1a, 0x41, 0x6e, 0x6e, 0x6f, 0x75, 0x6e, 0x63, 0x65, 0x4c, 0x6f, + 0x63, 0x61, 0x6c, 0x54, 0x72, 0x75, 0x73, 0x74, 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, + 0x12, 0x49, 0x0a, 0x04, 0x62, 0x6f, 0x64, 0x79, 0x18, 0x01, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x35, + 0x2e, 0x6e, 0x65, 0x6f, 0x2e, 0x66, 0x73, 0x2e, 0x76, 0x32, 0x2e, 0x72, 0x65, 0x70, 0x75, 0x74, + 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x2e, 0x41, 0x6e, 0x6e, 0x6f, 0x75, 0x6e, 0x63, 0x65, 0x4c, 0x6f, + 0x63, 0x61, 0x6c, 0x54, 0x72, 0x75, 0x73, 0x74, 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, + 0x2e, 0x42, 0x6f, 0x64, 0x79, 0x52, 0x04, 0x62, 0x6f, 0x64, 0x79, 0x12, 0x46, 0x0a, 0x0b, 0x6d, + 0x65, 0x74, 0x61, 0x5f, 0x68, 0x65, 0x61, 0x64, 0x65, 0x72, 0x18, 0x02, 0x20, 0x01, 0x28, 0x0b, + 0x32, 0x25, 0x2e, 0x6e, 0x65, 0x6f, 0x2e, 0x66, 0x73, 0x2e, 0x76, 0x32, 0x2e, 0x73, 0x65, 0x73, + 0x73, 0x69, 0x6f, 0x6e, 0x2e, 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x4d, 0x65, 0x74, + 0x61, 0x48, 0x65, 0x61, 0x64, 0x65, 0x72, 0x52, 0x0a, 0x6d, 0x65, 0x74, 0x61, 0x48, 0x65, 0x61, + 0x64, 0x65, 0x72, 0x12, 0x52, 0x0a, 0x0d, 0x76, 0x65, 0x72, 0x69, 0x66, 0x79, 0x5f, 0x68, 0x65, + 0x61, 0x64, 0x65, 0x72, 0x18, 0x03, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x2d, 0x2e, 0x6e, 0x65, 0x6f, + 0x2e, 0x66, 0x73, 0x2e, 0x76, 0x32, 0x2e, 0x73, 0x65, 0x73, 0x73, 0x69, 0x6f, 0x6e, 0x2e, 0x52, + 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x56, 0x65, 0x72, 0x69, 0x66, 0x69, 0x63, 0x61, 0x74, + 0x69, 0x6f, 0x6e, 0x48, 0x65, 0x61, 0x64, 0x65, 0x72, 0x52, 0x0c, 0x76, 0x65, 0x72, 0x69, 0x66, + 0x79, 0x48, 0x65, 0x61, 0x64, 0x65, 0x72, 0x1a, 0x06, 0x0a, 0x04, 0x42, 0x6f, 0x64, 0x79, 0x22, + 0x88, 0x03, 0x0a, 0x21, 0x41, 0x6e, 0x6e, 0x6f, 0x75, 0x6e, 0x63, 0x65, 0x49, 0x6e, 0x74, 0x65, + 0x72, 0x6d, 0x65, 0x64, 0x69, 0x61, 0x74, 0x65, 0x52, 0x65, 0x73, 0x75, 0x6c, 0x74, 0x52, 0x65, + 0x71, 0x75, 0x65, 0x73, 0x74, 0x12, 0x50, 0x0a, 0x04, 0x62, 0x6f, 0x64, 0x79, 0x18, 0x01, 0x20, + 0x01, 0x28, 0x0b, 0x32, 0x3c, 0x2e, 0x6e, 0x65, 0x6f, 0x2e, 0x66, 0x73, 0x2e, 0x76, 0x32, 0x2e, + 0x72, 0x65, 0x70, 0x75, 0x74, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x2e, 0x41, 0x6e, 0x6e, 0x6f, 0x75, + 0x6e, 0x63, 0x65, 0x49, 0x6e, 0x74, 0x65, 0x72, 0x6d, 0x65, 0x64, 0x69, 0x61, 0x74, 0x65, 0x52, + 0x65, 0x73, 0x75, 0x6c, 0x74, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x2e, 0x42, 0x6f, 0x64, + 0x79, 0x52, 0x04, 0x62, 0x6f, 0x64, 0x79, 0x12, 0x45, 0x0a, 0x0b, 0x6d, 0x65, 0x74, 0x61, 0x5f, + 0x68, 0x65, 0x61, 0x64, 0x65, 0x72, 0x18, 0x02, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x24, 0x2e, 0x6e, + 0x65, 0x6f, 0x2e, 0x66, 0x73, 0x2e, 0x76, 0x32, 0x2e, 0x73, 0x65, 0x73, 0x73, 0x69, 0x6f, 0x6e, + 0x2e, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x4d, 0x65, 0x74, 0x61, 0x48, 0x65, 0x61, 0x64, + 0x65, 0x72, 0x52, 0x0a, 0x6d, 0x65, 0x74, 0x61, 0x48, 0x65, 0x61, 0x64, 0x65, 0x72, 0x12, 0x51, + 0x0a, 0x0d, 0x76, 0x65, 0x72, 0x69, 0x66, 0x79, 0x5f, 0x68, 0x65, 0x61, 0x64, 0x65, 0x72, 0x18, + 0x03, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x2c, 0x2e, 0x6e, 0x65, 0x6f, 0x2e, 0x66, 0x73, 0x2e, 0x76, + 0x32, 0x2e, 0x73, 0x65, 0x73, 0x73, 0x69, 0x6f, 0x6e, 0x2e, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, + 0x74, 0x56, 0x65, 0x72, 0x69, 0x66, 0x69, 0x63, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x48, 0x65, 0x61, + 0x64, 0x65, 0x72, 0x52, 0x0c, 0x76, 0x65, 0x72, 0x69, 0x66, 0x79, 0x48, 0x65, 0x61, 0x64, 0x65, + 0x72, 0x1a, 0x77, 0x0a, 0x04, 0x42, 0x6f, 0x64, 0x79, 0x12, 0x14, 0x0a, 0x05, 0x65, 0x70, 0x6f, + 0x63, 0x68, 0x18, 0x01, 0x20, 0x01, 0x28, 0x04, 0x52, 0x05, 0x65, 0x70, 0x6f, 0x63, 0x68, 0x12, + 0x1c, 0x0a, 0x09, 0x69, 0x74, 0x65, 0x72, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x18, 0x02, 0x20, 0x01, + 0x28, 0x0d, 0x52, 0x09, 0x69, 0x74, 0x65, 0x72, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x12, 0x3b, 0x0a, + 0x05, 0x74, 0x72, 0x75, 0x73, 0x74, 0x18, 0x03, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x25, 0x2e, 0x6e, + 0x65, 0x6f, 0x2e, 0x66, 0x73, 0x2e, 0x76, 0x32, 0x2e, 0x72, 0x65, 0x70, 0x75, 0x74, 0x61, 0x74, + 0x69, 0x6f, 0x6e, 0x2e, 0x50, 0x65, 0x65, 0x72, 0x54, 0x6f, 0x50, 0x65, 0x65, 0x72, 0x54, 0x72, + 0x75, 0x73, 0x74, 0x52, 0x05, 0x74, 0x72, 0x75, 0x73, 0x74, 0x22, 0x9b, 0x02, 0x0a, 0x22, 0x41, + 0x6e, 0x6e, 0x6f, 0x75, 0x6e, 0x63, 0x65, 0x49, 0x6e, 0x74, 0x65, 0x72, 0x6d, 0x65, 0x64, 0x69, + 0x61, 0x74, 0x65, 0x52, 0x65, 0x73, 0x75, 0x6c, 0x74, 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, + 0x65, 0x12, 0x51, 0x0a, 0x04, 0x62, 0x6f, 0x64, 0x79, 0x18, 0x01, 0x20, 0x01, 0x28, 0x0b, 0x32, + 0x3d, 0x2e, 0x6e, 0x65, 0x6f, 0x2e, 0x66, 0x73, 0x2e, 0x76, 0x32, 0x2e, 0x72, 0x65, 0x70, 0x75, + 0x74, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x2e, 0x41, 0x6e, 0x6e, 0x6f, 0x75, 0x6e, 0x63, 0x65, 0x49, + 0x6e, 0x74, 0x65, 0x72, 0x6d, 0x65, 0x64, 0x69, 0x61, 0x74, 0x65, 0x52, 0x65, 0x73, 0x75, 0x6c, + 0x74, 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x2e, 0x42, 0x6f, 0x64, 0x79, 0x52, 0x04, + 0x62, 0x6f, 0x64, 0x79, 0x12, 0x46, 0x0a, 0x0b, 0x6d, 0x65, 0x74, 0x61, 0x5f, 0x68, 0x65, 0x61, + 0x64, 0x65, 0x72, 0x18, 0x02, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x25, 0x2e, 0x6e, 0x65, 0x6f, 0x2e, + 0x66, 0x73, 0x2e, 0x76, 0x32, 0x2e, 0x73, 0x65, 0x73, 0x73, 0x69, 0x6f, 0x6e, 0x2e, 0x52, 0x65, + 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x4d, 0x65, 0x74, 0x61, 0x48, 0x65, 0x61, 0x64, 0x65, 0x72, + 0x52, 0x0a, 0x6d, 0x65, 0x74, 0x61, 0x48, 0x65, 0x61, 0x64, 0x65, 0x72, 0x12, 0x52, 0x0a, 0x0d, + 0x76, 0x65, 0x72, 0x69, 0x66, 0x79, 0x5f, 0x68, 0x65, 0x61, 0x64, 0x65, 0x72, 0x18, 0x03, 0x20, + 0x01, 0x28, 0x0b, 0x32, 0x2d, 0x2e, 0x6e, 0x65, 0x6f, 0x2e, 0x66, 0x73, 0x2e, 0x76, 0x32, 0x2e, + 0x73, 0x65, 0x73, 0x73, 0x69, 0x6f, 0x6e, 0x2e, 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, + 0x56, 0x65, 0x72, 0x69, 0x66, 0x69, 0x63, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x48, 0x65, 0x61, 0x64, + 0x65, 0x72, 0x52, 0x0c, 0x76, 0x65, 0x72, 0x69, 0x66, 0x79, 0x48, 0x65, 0x61, 0x64, 0x65, 0x72, + 0x1a, 0x06, 0x0a, 0x04, 0x42, 0x6f, 0x64, 0x79, 0x32, 0x9e, 0x02, 0x0a, 0x11, 0x52, 0x65, 0x70, + 0x75, 0x74, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x53, 0x65, 0x72, 0x76, 0x69, 0x63, 0x65, 0x12, 0x77, + 0x0a, 0x12, 0x41, 0x6e, 0x6e, 0x6f, 0x75, 0x6e, 0x63, 0x65, 0x4c, 0x6f, 0x63, 0x61, 0x6c, 0x54, + 0x72, 0x75, 0x73, 0x74, 0x12, 0x2f, 0x2e, 0x6e, 0x65, 0x6f, 0x2e, 0x66, 0x73, 0x2e, 0x76, 0x32, + 0x2e, 0x72, 0x65, 0x70, 0x75, 0x74, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x2e, 0x41, 0x6e, 0x6e, 0x6f, + 0x75, 0x6e, 0x63, 0x65, 0x4c, 0x6f, 0x63, 0x61, 0x6c, 0x54, 0x72, 0x75, 0x73, 0x74, 0x52, 0x65, + 0x71, 0x75, 0x65, 0x73, 0x74, 0x1a, 0x30, 0x2e, 0x6e, 0x65, 0x6f, 0x2e, 0x66, 0x73, 0x2e, 0x76, + 0x32, 0x2e, 0x72, 0x65, 0x70, 0x75, 0x74, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x2e, 0x41, 0x6e, 0x6e, + 0x6f, 0x75, 0x6e, 0x63, 0x65, 0x4c, 0x6f, 0x63, 0x61, 0x6c, 0x54, 0x72, 0x75, 0x73, 0x74, 0x52, + 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x12, 0x8f, 0x01, 0x0a, 0x1a, 0x41, 0x6e, 0x6e, 0x6f, + 0x75, 0x6e, 0x63, 0x65, 0x49, 0x6e, 0x74, 0x65, 0x72, 0x6d, 0x65, 0x64, 0x69, 0x61, 0x74, 0x65, + 0x52, 0x65, 0x73, 0x75, 0x6c, 0x74, 0x12, 0x37, 0x2e, 0x6e, 0x65, 0x6f, 0x2e, 0x66, 0x73, 0x2e, + 0x76, 0x32, 0x2e, 0x72, 0x65, 0x70, 0x75, 0x74, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x2e, 0x41, 0x6e, + 0x6e, 0x6f, 0x75, 0x6e, 0x63, 0x65, 0x49, 0x6e, 0x74, 0x65, 0x72, 0x6d, 0x65, 0x64, 0x69, 0x61, + 0x74, 0x65, 0x52, 0x65, 0x73, 0x75, 0x6c, 0x74, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x1a, + 0x38, 0x2e, 0x6e, 0x65, 0x6f, 0x2e, 0x66, 0x73, 0x2e, 0x76, 0x32, 0x2e, 0x72, 0x65, 0x70, 0x75, + 0x74, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x2e, 0x41, 0x6e, 0x6e, 0x6f, 0x75, 0x6e, 0x63, 0x65, 0x49, + 0x6e, 0x74, 0x65, 0x72, 0x6d, 0x65, 0x64, 0x69, 0x61, 0x74, 0x65, 0x52, 0x65, 0x73, 0x75, 0x6c, + 0x74, 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x42, 0x62, 0x5a, 0x3f, 0x67, 0x69, 0x74, + 0x68, 0x75, 0x62, 0x2e, 0x63, 0x6f, 0x6d, 0x2f, 0x6e, 0x73, 0x70, 0x63, 0x63, 0x2d, 0x64, 0x65, + 0x76, 0x2f, 0x6e, 0x65, 0x6f, 0x66, 0x73, 0x2d, 0x61, 0x70, 0x69, 0x2d, 0x67, 0x6f, 0x2f, 0x76, + 0x32, 0x2f, 0x72, 0x65, 0x70, 0x75, 0x74, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x2f, 0x67, 0x72, 0x70, + 0x63, 0x3b, 0x72, 0x65, 0x70, 0x75, 0x74, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0xaa, 0x02, 0x1e, 0x4e, + 0x65, 0x6f, 0x2e, 0x46, 0x69, 0x6c, 0x65, 0x53, 0x74, 0x6f, 0x72, 0x61, 0x67, 0x65, 0x2e, 0x41, + 0x50, 0x49, 0x2e, 0x52, 0x65, 0x70, 0x75, 0x74, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x62, 0x06, 0x70, + 0x72, 0x6f, 0x74, 0x6f, 0x33, +} + +var ( + file_reputation_grpc_service_proto_rawDescOnce sync.Once + file_reputation_grpc_service_proto_rawDescData = file_reputation_grpc_service_proto_rawDesc +) + +func file_reputation_grpc_service_proto_rawDescGZIP() []byte { + file_reputation_grpc_service_proto_rawDescOnce.Do(func() { + file_reputation_grpc_service_proto_rawDescData = protoimpl.X.CompressGZIP(file_reputation_grpc_service_proto_rawDescData) + }) + return file_reputation_grpc_service_proto_rawDescData +} + +var file_reputation_grpc_service_proto_msgTypes = make([]protoimpl.MessageInfo, 8) +var file_reputation_grpc_service_proto_goTypes = []interface{}{ + (*AnnounceLocalTrustRequest)(nil), // 0: neo.fs.v2.reputation.AnnounceLocalTrustRequest + (*AnnounceLocalTrustResponse)(nil), // 1: neo.fs.v2.reputation.AnnounceLocalTrustResponse + (*AnnounceIntermediateResultRequest)(nil), // 2: neo.fs.v2.reputation.AnnounceIntermediateResultRequest + (*AnnounceIntermediateResultResponse)(nil), // 3: neo.fs.v2.reputation.AnnounceIntermediateResultResponse + (*AnnounceLocalTrustRequest_Body)(nil), // 4: neo.fs.v2.reputation.AnnounceLocalTrustRequest.Body + (*AnnounceLocalTrustResponse_Body)(nil), // 5: neo.fs.v2.reputation.AnnounceLocalTrustResponse.Body + (*AnnounceIntermediateResultRequest_Body)(nil), // 6: neo.fs.v2.reputation.AnnounceIntermediateResultRequest.Body + (*AnnounceIntermediateResultResponse_Body)(nil), // 7: neo.fs.v2.reputation.AnnounceIntermediateResultResponse.Body + (*session.RequestMetaHeader)(nil), // 8: neo.fs.v2.session.RequestMetaHeader + (*session.RequestVerificationHeader)(nil), // 9: neo.fs.v2.session.RequestVerificationHeader + (*session.ResponseMetaHeader)(nil), // 10: neo.fs.v2.session.ResponseMetaHeader + (*session.ResponseVerificationHeader)(nil), // 11: neo.fs.v2.session.ResponseVerificationHeader + (*Trust)(nil), // 12: neo.fs.v2.reputation.Trust + (*PeerToPeerTrust)(nil), // 13: neo.fs.v2.reputation.PeerToPeerTrust +} +var file_reputation_grpc_service_proto_depIdxs = []int32{ + 4, // 0: neo.fs.v2.reputation.AnnounceLocalTrustRequest.body:type_name -> neo.fs.v2.reputation.AnnounceLocalTrustRequest.Body + 8, // 1: neo.fs.v2.reputation.AnnounceLocalTrustRequest.meta_header:type_name -> neo.fs.v2.session.RequestMetaHeader + 9, // 2: neo.fs.v2.reputation.AnnounceLocalTrustRequest.verify_header:type_name -> neo.fs.v2.session.RequestVerificationHeader + 5, // 3: neo.fs.v2.reputation.AnnounceLocalTrustResponse.body:type_name -> neo.fs.v2.reputation.AnnounceLocalTrustResponse.Body + 10, // 4: neo.fs.v2.reputation.AnnounceLocalTrustResponse.meta_header:type_name -> neo.fs.v2.session.ResponseMetaHeader + 11, // 5: neo.fs.v2.reputation.AnnounceLocalTrustResponse.verify_header:type_name -> neo.fs.v2.session.ResponseVerificationHeader + 6, // 6: neo.fs.v2.reputation.AnnounceIntermediateResultRequest.body:type_name -> neo.fs.v2.reputation.AnnounceIntermediateResultRequest.Body + 8, // 7: neo.fs.v2.reputation.AnnounceIntermediateResultRequest.meta_header:type_name -> neo.fs.v2.session.RequestMetaHeader + 9, // 8: neo.fs.v2.reputation.AnnounceIntermediateResultRequest.verify_header:type_name -> neo.fs.v2.session.RequestVerificationHeader + 7, // 9: neo.fs.v2.reputation.AnnounceIntermediateResultResponse.body:type_name -> neo.fs.v2.reputation.AnnounceIntermediateResultResponse.Body + 10, // 10: neo.fs.v2.reputation.AnnounceIntermediateResultResponse.meta_header:type_name -> neo.fs.v2.session.ResponseMetaHeader + 11, // 11: neo.fs.v2.reputation.AnnounceIntermediateResultResponse.verify_header:type_name -> neo.fs.v2.session.ResponseVerificationHeader + 12, // 12: neo.fs.v2.reputation.AnnounceLocalTrustRequest.Body.trusts:type_name -> neo.fs.v2.reputation.Trust + 13, // 13: neo.fs.v2.reputation.AnnounceIntermediateResultRequest.Body.trust:type_name -> neo.fs.v2.reputation.PeerToPeerTrust + 0, // 14: neo.fs.v2.reputation.ReputationService.AnnounceLocalTrust:input_type -> neo.fs.v2.reputation.AnnounceLocalTrustRequest + 2, // 15: neo.fs.v2.reputation.ReputationService.AnnounceIntermediateResult:input_type -> neo.fs.v2.reputation.AnnounceIntermediateResultRequest + 1, // 16: neo.fs.v2.reputation.ReputationService.AnnounceLocalTrust:output_type -> neo.fs.v2.reputation.AnnounceLocalTrustResponse + 3, // 17: neo.fs.v2.reputation.ReputationService.AnnounceIntermediateResult:output_type -> neo.fs.v2.reputation.AnnounceIntermediateResultResponse + 16, // [16:18] is the sub-list for method output_type + 14, // [14:16] is the sub-list for method input_type + 14, // [14:14] is the sub-list for extension type_name + 14, // [14:14] is the sub-list for extension extendee + 0, // [0:14] is the sub-list for field type_name +} + +func init() { file_reputation_grpc_service_proto_init() } +func file_reputation_grpc_service_proto_init() { + if File_reputation_grpc_service_proto != nil { + return + } + file_reputation_grpc_types_proto_init() + if !protoimpl.UnsafeEnabled { + file_reputation_grpc_service_proto_msgTypes[0].Exporter = func(v interface{}, i int) interface{} { + switch v := v.(*AnnounceLocalTrustRequest); i { + case 0: + return &v.state + case 1: + return &v.sizeCache + case 2: + return &v.unknownFields + default: + return nil + } + } + file_reputation_grpc_service_proto_msgTypes[1].Exporter = func(v interface{}, i int) interface{} { + switch v := v.(*AnnounceLocalTrustResponse); i { + case 0: + return &v.state + case 1: + return &v.sizeCache + case 2: + return &v.unknownFields + default: + return nil + } + } + file_reputation_grpc_service_proto_msgTypes[2].Exporter = func(v interface{}, i int) interface{} { + switch v := v.(*AnnounceIntermediateResultRequest); i { + case 0: + return &v.state + case 1: + return &v.sizeCache + case 2: + return &v.unknownFields + default: + return nil + } + } + file_reputation_grpc_service_proto_msgTypes[3].Exporter = func(v interface{}, i int) interface{} { + switch v := v.(*AnnounceIntermediateResultResponse); i { + case 0: + return &v.state + case 1: + return &v.sizeCache + case 2: + return &v.unknownFields + default: + return nil + } + } + file_reputation_grpc_service_proto_msgTypes[4].Exporter = func(v interface{}, i int) interface{} { + switch v := v.(*AnnounceLocalTrustRequest_Body); i { + case 0: + return &v.state + case 1: + return &v.sizeCache + case 2: + return &v.unknownFields + default: + return nil + } + } + file_reputation_grpc_service_proto_msgTypes[5].Exporter = func(v interface{}, i int) interface{} { + switch v := v.(*AnnounceLocalTrustResponse_Body); i { + case 0: + return &v.state + case 1: + return &v.sizeCache + case 2: + return &v.unknownFields + default: + return nil + } + } + file_reputation_grpc_service_proto_msgTypes[6].Exporter = func(v interface{}, i int) interface{} { + switch v := v.(*AnnounceIntermediateResultRequest_Body); i { + case 0: + return &v.state + case 1: + return &v.sizeCache + case 2: + return &v.unknownFields + default: + return nil + } + } + file_reputation_grpc_service_proto_msgTypes[7].Exporter = func(v interface{}, i int) interface{} { + switch v := v.(*AnnounceIntermediateResultResponse_Body); i { + case 0: + return &v.state + case 1: + return &v.sizeCache + case 2: + return &v.unknownFields + default: + return nil + } + } + } + type x struct{} + out := protoimpl.TypeBuilder{ + File: protoimpl.DescBuilder{ + GoPackagePath: reflect.TypeOf(x{}).PkgPath(), + RawDescriptor: file_reputation_grpc_service_proto_rawDesc, + NumEnums: 0, + NumMessages: 8, + NumExtensions: 0, + NumServices: 1, + }, + GoTypes: file_reputation_grpc_service_proto_goTypes, + DependencyIndexes: file_reputation_grpc_service_proto_depIdxs, + MessageInfos: file_reputation_grpc_service_proto_msgTypes, + }.Build() + File_reputation_grpc_service_proto = out.File + file_reputation_grpc_service_proto_rawDesc = nil + file_reputation_grpc_service_proto_goTypes = nil + file_reputation_grpc_service_proto_depIdxs = nil +} diff --git a/api/reputation/service_grpc.pb.go b/api/reputation/service_grpc.pb.go new file mode 100644 index 000000000..61ac363e4 --- /dev/null +++ b/api/reputation/service_grpc.pb.go @@ -0,0 +1,170 @@ +// Code generated by protoc-gen-go-grpc. DO NOT EDIT. +// versions: +// - protoc-gen-go-grpc v1.3.0 +// - protoc v4.25.1 +// source: reputation/grpc/service.proto + +package reputation + +import ( + context "context" + grpc "google.golang.org/grpc" + codes "google.golang.org/grpc/codes" + status "google.golang.org/grpc/status" +) + +// This is a compile-time assertion to ensure that this generated file +// is compatible with the grpc package it is being compiled against. +// Requires gRPC-Go v1.32.0 or later. +const _ = grpc.SupportPackageIsVersion7 + +const ( + ReputationService_AnnounceLocalTrust_FullMethodName = "/neo.fs.v2.reputation.ReputationService/AnnounceLocalTrust" + ReputationService_AnnounceIntermediateResult_FullMethodName = "/neo.fs.v2.reputation.ReputationService/AnnounceIntermediateResult" +) + +// ReputationServiceClient is the client API for ReputationService service. +// +// For semantics around ctx use and closing/ending streaming RPCs, please refer to https://pkg.go.dev/google.golang.org/grpc/?tab=doc#ClientConn.NewStream. +type ReputationServiceClient interface { + // Announce local client trust information to any node in NeoFS network. + // + // Statuses: + // - **OK** (0, SECTION_SUCCESS): + // local trust has been successfully announced; + // - Common failures (SECTION_FAILURE_COMMON). + AnnounceLocalTrust(ctx context.Context, in *AnnounceLocalTrustRequest, opts ...grpc.CallOption) (*AnnounceLocalTrustResponse, error) + // Announce the intermediate result of the iterative algorithm for + // calculating the global reputation of the node in NeoFS network. + // + // Statuses: + // - **OK** (0, SECTION_SUCCESS): + // intermediate trust estimation has been successfully announced; + // - Common failures (SECTION_FAILURE_COMMON). + AnnounceIntermediateResult(ctx context.Context, in *AnnounceIntermediateResultRequest, opts ...grpc.CallOption) (*AnnounceIntermediateResultResponse, error) +} + +type reputationServiceClient struct { + cc grpc.ClientConnInterface +} + +func NewReputationServiceClient(cc grpc.ClientConnInterface) ReputationServiceClient { + return &reputationServiceClient{cc} +} + +func (c *reputationServiceClient) AnnounceLocalTrust(ctx context.Context, in *AnnounceLocalTrustRequest, opts ...grpc.CallOption) (*AnnounceLocalTrustResponse, error) { + out := new(AnnounceLocalTrustResponse) + err := c.cc.Invoke(ctx, ReputationService_AnnounceLocalTrust_FullMethodName, in, out, opts...) + if err != nil { + return nil, err + } + return out, nil +} + +func (c *reputationServiceClient) AnnounceIntermediateResult(ctx context.Context, in *AnnounceIntermediateResultRequest, opts ...grpc.CallOption) (*AnnounceIntermediateResultResponse, error) { + out := new(AnnounceIntermediateResultResponse) + err := c.cc.Invoke(ctx, ReputationService_AnnounceIntermediateResult_FullMethodName, in, out, opts...) + if err != nil { + return nil, err + } + return out, nil +} + +// ReputationServiceServer is the server API for ReputationService service. +// All implementations should embed UnimplementedReputationServiceServer +// for forward compatibility +type ReputationServiceServer interface { + // Announce local client trust information to any node in NeoFS network. + // + // Statuses: + // - **OK** (0, SECTION_SUCCESS): + // local trust has been successfully announced; + // - Common failures (SECTION_FAILURE_COMMON). + AnnounceLocalTrust(context.Context, *AnnounceLocalTrustRequest) (*AnnounceLocalTrustResponse, error) + // Announce the intermediate result of the iterative algorithm for + // calculating the global reputation of the node in NeoFS network. + // + // Statuses: + // - **OK** (0, SECTION_SUCCESS): + // intermediate trust estimation has been successfully announced; + // - Common failures (SECTION_FAILURE_COMMON). + AnnounceIntermediateResult(context.Context, *AnnounceIntermediateResultRequest) (*AnnounceIntermediateResultResponse, error) +} + +// UnimplementedReputationServiceServer should be embedded to have forward compatible implementations. +type UnimplementedReputationServiceServer struct { +} + +func (UnimplementedReputationServiceServer) AnnounceLocalTrust(context.Context, *AnnounceLocalTrustRequest) (*AnnounceLocalTrustResponse, error) { + return nil, status.Errorf(codes.Unimplemented, "method AnnounceLocalTrust not implemented") +} +func (UnimplementedReputationServiceServer) AnnounceIntermediateResult(context.Context, *AnnounceIntermediateResultRequest) (*AnnounceIntermediateResultResponse, error) { + return nil, status.Errorf(codes.Unimplemented, "method AnnounceIntermediateResult not implemented") +} + +// UnsafeReputationServiceServer may be embedded to opt out of forward compatibility for this service. +// Use of this interface is not recommended, as added methods to ReputationServiceServer will +// result in compilation errors. +type UnsafeReputationServiceServer interface { + mustEmbedUnimplementedReputationServiceServer() +} + +func RegisterReputationServiceServer(s grpc.ServiceRegistrar, srv ReputationServiceServer) { + s.RegisterService(&ReputationService_ServiceDesc, srv) +} + +func _ReputationService_AnnounceLocalTrust_Handler(srv interface{}, ctx context.Context, dec func(interface{}) error, interceptor grpc.UnaryServerInterceptor) (interface{}, error) { + in := new(AnnounceLocalTrustRequest) + if err := dec(in); err != nil { + return nil, err + } + if interceptor == nil { + return srv.(ReputationServiceServer).AnnounceLocalTrust(ctx, in) + } + info := &grpc.UnaryServerInfo{ + Server: srv, + FullMethod: ReputationService_AnnounceLocalTrust_FullMethodName, + } + handler := func(ctx context.Context, req interface{}) (interface{}, error) { + return srv.(ReputationServiceServer).AnnounceLocalTrust(ctx, req.(*AnnounceLocalTrustRequest)) + } + return interceptor(ctx, in, info, handler) +} + +func _ReputationService_AnnounceIntermediateResult_Handler(srv interface{}, ctx context.Context, dec func(interface{}) error, interceptor grpc.UnaryServerInterceptor) (interface{}, error) { + in := new(AnnounceIntermediateResultRequest) + if err := dec(in); err != nil { + return nil, err + } + if interceptor == nil { + return srv.(ReputationServiceServer).AnnounceIntermediateResult(ctx, in) + } + info := &grpc.UnaryServerInfo{ + Server: srv, + FullMethod: ReputationService_AnnounceIntermediateResult_FullMethodName, + } + handler := func(ctx context.Context, req interface{}) (interface{}, error) { + return srv.(ReputationServiceServer).AnnounceIntermediateResult(ctx, req.(*AnnounceIntermediateResultRequest)) + } + return interceptor(ctx, in, info, handler) +} + +// ReputationService_ServiceDesc is the grpc.ServiceDesc for ReputationService service. +// It's only intended for direct use with grpc.RegisterService, +// and not to be introspected or modified (even as a copy) +var ReputationService_ServiceDesc = grpc.ServiceDesc{ + ServiceName: "neo.fs.v2.reputation.ReputationService", + HandlerType: (*ReputationServiceServer)(nil), + Methods: []grpc.MethodDesc{ + { + MethodName: "AnnounceLocalTrust", + Handler: _ReputationService_AnnounceLocalTrust_Handler, + }, + { + MethodName: "AnnounceIntermediateResult", + Handler: _ReputationService_AnnounceIntermediateResult_Handler, + }, + }, + Streams: []grpc.StreamDesc{}, + Metadata: "reputation/grpc/service.proto", +} diff --git a/api/reputation/types.pb.go b/api/reputation/types.pb.go new file mode 100644 index 000000000..56f099028 --- /dev/null +++ b/api/reputation/types.pb.go @@ -0,0 +1,501 @@ +// Code generated by protoc-gen-go. DO NOT EDIT. +// versions: +// protoc-gen-go v1.32.0 +// protoc v4.25.1 +// source: reputation/grpc/types.proto + +package reputation + +import ( + reflect "reflect" + sync "sync" + + "github.com/nspcc-dev/neofs-sdk-go/api/refs" + protoreflect "google.golang.org/protobuf/reflect/protoreflect" + protoimpl "google.golang.org/protobuf/runtime/protoimpl" +) + +const ( + // Verify that this generated code is sufficiently up-to-date. + _ = protoimpl.EnforceVersion(20 - protoimpl.MinVersion) + // Verify that runtime/protoimpl is sufficiently up-to-date. + _ = protoimpl.EnforceVersion(protoimpl.MaxVersion - 20) +) + +// NeoFS unique peer identifier is a 33 byte long compressed public key of the +// node, the same as the one stored in the network map. +// +// String presentation is a +// [base58](https://tools.ietf.org/html/draft-msporny-base58-02) encoded string. +// +// JSON value will be data encoded as a string using standard base64 +// encoding with paddings. Either +// [standard](https://tools.ietf.org/html/rfc4648#section-4) or +// [URL-safe](https://tools.ietf.org/html/rfc4648#section-5) base64 encoding +// with/without paddings are accepted. +type PeerID struct { + state protoimpl.MessageState + sizeCache protoimpl.SizeCache + unknownFields protoimpl.UnknownFields + + // Peer node's public key + PublicKey []byte `protobuf:"bytes,1,opt,name=public_key,json=publicKey,proto3" json:"public_key,omitempty"` +} + +func (x *PeerID) Reset() { + *x = PeerID{} + if protoimpl.UnsafeEnabled { + mi := &file_reputation_grpc_types_proto_msgTypes[0] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) + } +} + +func (x *PeerID) String() string { + return protoimpl.X.MessageStringOf(x) +} + +func (*PeerID) ProtoMessage() {} + +func (x *PeerID) ProtoReflect() protoreflect.Message { + mi := &file_reputation_grpc_types_proto_msgTypes[0] + if protoimpl.UnsafeEnabled && x != nil { + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + if ms.LoadMessageInfo() == nil { + ms.StoreMessageInfo(mi) + } + return ms + } + return mi.MessageOf(x) +} + +// Deprecated: Use PeerID.ProtoReflect.Descriptor instead. +func (*PeerID) Descriptor() ([]byte, []int) { + return file_reputation_grpc_types_proto_rawDescGZIP(), []int{0} +} + +func (x *PeerID) GetPublicKey() []byte { + if x != nil { + return x.PublicKey + } + return nil +} + +// Trust level to a NeoFS network peer. +type Trust struct { + state protoimpl.MessageState + sizeCache protoimpl.SizeCache + unknownFields protoimpl.UnknownFields + + // Identifier of the trusted peer + Peer *PeerID `protobuf:"bytes,1,opt,name=peer,proto3" json:"peer,omitempty"` + // Trust level in [0:1] range + Value float64 `protobuf:"fixed64,2,opt,name=value,proto3" json:"value,omitempty"` +} + +func (x *Trust) Reset() { + *x = Trust{} + if protoimpl.UnsafeEnabled { + mi := &file_reputation_grpc_types_proto_msgTypes[1] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) + } +} + +func (x *Trust) String() string { + return protoimpl.X.MessageStringOf(x) +} + +func (*Trust) ProtoMessage() {} + +func (x *Trust) ProtoReflect() protoreflect.Message { + mi := &file_reputation_grpc_types_proto_msgTypes[1] + if protoimpl.UnsafeEnabled && x != nil { + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + if ms.LoadMessageInfo() == nil { + ms.StoreMessageInfo(mi) + } + return ms + } + return mi.MessageOf(x) +} + +// Deprecated: Use Trust.ProtoReflect.Descriptor instead. +func (*Trust) Descriptor() ([]byte, []int) { + return file_reputation_grpc_types_proto_rawDescGZIP(), []int{1} +} + +func (x *Trust) GetPeer() *PeerID { + if x != nil { + return x.Peer + } + return nil +} + +func (x *Trust) GetValue() float64 { + if x != nil { + return x.Value + } + return 0 +} + +// Trust level of a peer to a peer. +type PeerToPeerTrust struct { + state protoimpl.MessageState + sizeCache protoimpl.SizeCache + unknownFields protoimpl.UnknownFields + + // Identifier of the trusting peer + TrustingPeer *PeerID `protobuf:"bytes,1,opt,name=trusting_peer,json=trustingPeer,proto3" json:"trusting_peer,omitempty"` + // Trust level + Trust *Trust `protobuf:"bytes,2,opt,name=trust,proto3" json:"trust,omitempty"` +} + +func (x *PeerToPeerTrust) Reset() { + *x = PeerToPeerTrust{} + if protoimpl.UnsafeEnabled { + mi := &file_reputation_grpc_types_proto_msgTypes[2] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) + } +} + +func (x *PeerToPeerTrust) String() string { + return protoimpl.X.MessageStringOf(x) +} + +func (*PeerToPeerTrust) ProtoMessage() {} + +func (x *PeerToPeerTrust) ProtoReflect() protoreflect.Message { + mi := &file_reputation_grpc_types_proto_msgTypes[2] + if protoimpl.UnsafeEnabled && x != nil { + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + if ms.LoadMessageInfo() == nil { + ms.StoreMessageInfo(mi) + } + return ms + } + return mi.MessageOf(x) +} + +// Deprecated: Use PeerToPeerTrust.ProtoReflect.Descriptor instead. +func (*PeerToPeerTrust) Descriptor() ([]byte, []int) { + return file_reputation_grpc_types_proto_rawDescGZIP(), []int{2} +} + +func (x *PeerToPeerTrust) GetTrustingPeer() *PeerID { + if x != nil { + return x.TrustingPeer + } + return nil +} + +func (x *PeerToPeerTrust) GetTrust() *Trust { + if x != nil { + return x.Trust + } + return nil +} + +// Global trust level to NeoFS node. +type GlobalTrust struct { + state protoimpl.MessageState + sizeCache protoimpl.SizeCache + unknownFields protoimpl.UnknownFields + + // Message format version. Effectively, the version of API library used to create + // the message. + Version *refs.Version `protobuf:"bytes,1,opt,name=version,proto3" json:"version,omitempty"` + // Message body + Body *GlobalTrust_Body `protobuf:"bytes,2,opt,name=body,proto3" json:"body,omitempty"` + // Signature of the binary `body` field by the manager. + Signature *refs.Signature `protobuf:"bytes,3,opt,name=signature,proto3" json:"signature,omitempty"` +} + +func (x *GlobalTrust) Reset() { + *x = GlobalTrust{} + if protoimpl.UnsafeEnabled { + mi := &file_reputation_grpc_types_proto_msgTypes[3] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) + } +} + +func (x *GlobalTrust) String() string { + return protoimpl.X.MessageStringOf(x) +} + +func (*GlobalTrust) ProtoMessage() {} + +func (x *GlobalTrust) ProtoReflect() protoreflect.Message { + mi := &file_reputation_grpc_types_proto_msgTypes[3] + if protoimpl.UnsafeEnabled && x != nil { + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + if ms.LoadMessageInfo() == nil { + ms.StoreMessageInfo(mi) + } + return ms + } + return mi.MessageOf(x) +} + +// Deprecated: Use GlobalTrust.ProtoReflect.Descriptor instead. +func (*GlobalTrust) Descriptor() ([]byte, []int) { + return file_reputation_grpc_types_proto_rawDescGZIP(), []int{3} +} + +func (x *GlobalTrust) GetVersion() *refs.Version { + if x != nil { + return x.Version + } + return nil +} + +func (x *GlobalTrust) GetBody() *GlobalTrust_Body { + if x != nil { + return x.Body + } + return nil +} + +func (x *GlobalTrust) GetSignature() *refs.Signature { + if x != nil { + return x.Signature + } + return nil +} + +// Message body structure. +type GlobalTrust_Body struct { + state protoimpl.MessageState + sizeCache protoimpl.SizeCache + unknownFields protoimpl.UnknownFields + + // Node manager ID + Manager *PeerID `protobuf:"bytes,1,opt,name=manager,proto3" json:"manager,omitempty"` + // Global trust level + Trust *Trust `protobuf:"bytes,2,opt,name=trust,proto3" json:"trust,omitempty"` +} + +func (x *GlobalTrust_Body) Reset() { + *x = GlobalTrust_Body{} + if protoimpl.UnsafeEnabled { + mi := &file_reputation_grpc_types_proto_msgTypes[4] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) + } +} + +func (x *GlobalTrust_Body) String() string { + return protoimpl.X.MessageStringOf(x) +} + +func (*GlobalTrust_Body) ProtoMessage() {} + +func (x *GlobalTrust_Body) ProtoReflect() protoreflect.Message { + mi := &file_reputation_grpc_types_proto_msgTypes[4] + if protoimpl.UnsafeEnabled && x != nil { + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + if ms.LoadMessageInfo() == nil { + ms.StoreMessageInfo(mi) + } + return ms + } + return mi.MessageOf(x) +} + +// Deprecated: Use GlobalTrust_Body.ProtoReflect.Descriptor instead. +func (*GlobalTrust_Body) Descriptor() ([]byte, []int) { + return file_reputation_grpc_types_proto_rawDescGZIP(), []int{3, 0} +} + +func (x *GlobalTrust_Body) GetManager() *PeerID { + if x != nil { + return x.Manager + } + return nil +} + +func (x *GlobalTrust_Body) GetTrust() *Trust { + if x != nil { + return x.Trust + } + return nil +} + +var File_reputation_grpc_types_proto protoreflect.FileDescriptor + +var file_reputation_grpc_types_proto_rawDesc = []byte{ + 0x0a, 0x1b, 0x72, 0x65, 0x70, 0x75, 0x74, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x2f, 0x67, 0x72, 0x70, + 0x63, 0x2f, 0x74, 0x79, 0x70, 0x65, 0x73, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x12, 0x14, 0x6e, + 0x65, 0x6f, 0x2e, 0x66, 0x73, 0x2e, 0x76, 0x32, 0x2e, 0x72, 0x65, 0x70, 0x75, 0x74, 0x61, 0x74, + 0x69, 0x6f, 0x6e, 0x1a, 0x15, 0x72, 0x65, 0x66, 0x73, 0x2f, 0x67, 0x72, 0x70, 0x63, 0x2f, 0x74, + 0x79, 0x70, 0x65, 0x73, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x22, 0x27, 0x0a, 0x06, 0x50, 0x65, + 0x65, 0x72, 0x49, 0x44, 0x12, 0x1d, 0x0a, 0x0a, 0x70, 0x75, 0x62, 0x6c, 0x69, 0x63, 0x5f, 0x6b, + 0x65, 0x79, 0x18, 0x01, 0x20, 0x01, 0x28, 0x0c, 0x52, 0x09, 0x70, 0x75, 0x62, 0x6c, 0x69, 0x63, + 0x4b, 0x65, 0x79, 0x22, 0x4f, 0x0a, 0x05, 0x54, 0x72, 0x75, 0x73, 0x74, 0x12, 0x30, 0x0a, 0x04, + 0x70, 0x65, 0x65, 0x72, 0x18, 0x01, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x1c, 0x2e, 0x6e, 0x65, 0x6f, + 0x2e, 0x66, 0x73, 0x2e, 0x76, 0x32, 0x2e, 0x72, 0x65, 0x70, 0x75, 0x74, 0x61, 0x74, 0x69, 0x6f, + 0x6e, 0x2e, 0x50, 0x65, 0x65, 0x72, 0x49, 0x44, 0x52, 0x04, 0x70, 0x65, 0x65, 0x72, 0x12, 0x14, + 0x0a, 0x05, 0x76, 0x61, 0x6c, 0x75, 0x65, 0x18, 0x02, 0x20, 0x01, 0x28, 0x01, 0x52, 0x05, 0x76, + 0x61, 0x6c, 0x75, 0x65, 0x22, 0x87, 0x01, 0x0a, 0x0f, 0x50, 0x65, 0x65, 0x72, 0x54, 0x6f, 0x50, + 0x65, 0x65, 0x72, 0x54, 0x72, 0x75, 0x73, 0x74, 0x12, 0x41, 0x0a, 0x0d, 0x74, 0x72, 0x75, 0x73, + 0x74, 0x69, 0x6e, 0x67, 0x5f, 0x70, 0x65, 0x65, 0x72, 0x18, 0x01, 0x20, 0x01, 0x28, 0x0b, 0x32, + 0x1c, 0x2e, 0x6e, 0x65, 0x6f, 0x2e, 0x66, 0x73, 0x2e, 0x76, 0x32, 0x2e, 0x72, 0x65, 0x70, 0x75, + 0x74, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x2e, 0x50, 0x65, 0x65, 0x72, 0x49, 0x44, 0x52, 0x0c, 0x74, + 0x72, 0x75, 0x73, 0x74, 0x69, 0x6e, 0x67, 0x50, 0x65, 0x65, 0x72, 0x12, 0x31, 0x0a, 0x05, 0x74, + 0x72, 0x75, 0x73, 0x74, 0x18, 0x02, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x1b, 0x2e, 0x6e, 0x65, 0x6f, + 0x2e, 0x66, 0x73, 0x2e, 0x76, 0x32, 0x2e, 0x72, 0x65, 0x70, 0x75, 0x74, 0x61, 0x74, 0x69, 0x6f, + 0x6e, 0x2e, 0x54, 0x72, 0x75, 0x73, 0x74, 0x52, 0x05, 0x74, 0x72, 0x75, 0x73, 0x74, 0x22, 0xa8, + 0x02, 0x0a, 0x0b, 0x47, 0x6c, 0x6f, 0x62, 0x61, 0x6c, 0x54, 0x72, 0x75, 0x73, 0x74, 0x12, 0x31, + 0x0a, 0x07, 0x76, 0x65, 0x72, 0x73, 0x69, 0x6f, 0x6e, 0x18, 0x01, 0x20, 0x01, 0x28, 0x0b, 0x32, + 0x17, 0x2e, 0x6e, 0x65, 0x6f, 0x2e, 0x66, 0x73, 0x2e, 0x76, 0x32, 0x2e, 0x72, 0x65, 0x66, 0x73, + 0x2e, 0x56, 0x65, 0x72, 0x73, 0x69, 0x6f, 0x6e, 0x52, 0x07, 0x76, 0x65, 0x72, 0x73, 0x69, 0x6f, + 0x6e, 0x12, 0x3a, 0x0a, 0x04, 0x62, 0x6f, 0x64, 0x79, 0x18, 0x02, 0x20, 0x01, 0x28, 0x0b, 0x32, + 0x26, 0x2e, 0x6e, 0x65, 0x6f, 0x2e, 0x66, 0x73, 0x2e, 0x76, 0x32, 0x2e, 0x72, 0x65, 0x70, 0x75, + 0x74, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x2e, 0x47, 0x6c, 0x6f, 0x62, 0x61, 0x6c, 0x54, 0x72, 0x75, + 0x73, 0x74, 0x2e, 0x42, 0x6f, 0x64, 0x79, 0x52, 0x04, 0x62, 0x6f, 0x64, 0x79, 0x12, 0x37, 0x0a, + 0x09, 0x73, 0x69, 0x67, 0x6e, 0x61, 0x74, 0x75, 0x72, 0x65, 0x18, 0x03, 0x20, 0x01, 0x28, 0x0b, + 0x32, 0x19, 0x2e, 0x6e, 0x65, 0x6f, 0x2e, 0x66, 0x73, 0x2e, 0x76, 0x32, 0x2e, 0x72, 0x65, 0x66, + 0x73, 0x2e, 0x53, 0x69, 0x67, 0x6e, 0x61, 0x74, 0x75, 0x72, 0x65, 0x52, 0x09, 0x73, 0x69, 0x67, + 0x6e, 0x61, 0x74, 0x75, 0x72, 0x65, 0x1a, 0x71, 0x0a, 0x04, 0x42, 0x6f, 0x64, 0x79, 0x12, 0x36, + 0x0a, 0x07, 0x6d, 0x61, 0x6e, 0x61, 0x67, 0x65, 0x72, 0x18, 0x01, 0x20, 0x01, 0x28, 0x0b, 0x32, + 0x1c, 0x2e, 0x6e, 0x65, 0x6f, 0x2e, 0x66, 0x73, 0x2e, 0x76, 0x32, 0x2e, 0x72, 0x65, 0x70, 0x75, + 0x74, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x2e, 0x50, 0x65, 0x65, 0x72, 0x49, 0x44, 0x52, 0x07, 0x6d, + 0x61, 0x6e, 0x61, 0x67, 0x65, 0x72, 0x12, 0x31, 0x0a, 0x05, 0x74, 0x72, 0x75, 0x73, 0x74, 0x18, + 0x02, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x1b, 0x2e, 0x6e, 0x65, 0x6f, 0x2e, 0x66, 0x73, 0x2e, 0x76, + 0x32, 0x2e, 0x72, 0x65, 0x70, 0x75, 0x74, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x2e, 0x54, 0x72, 0x75, + 0x73, 0x74, 0x52, 0x05, 0x74, 0x72, 0x75, 0x73, 0x74, 0x42, 0x62, 0x5a, 0x3f, 0x67, 0x69, 0x74, + 0x68, 0x75, 0x62, 0x2e, 0x63, 0x6f, 0x6d, 0x2f, 0x6e, 0x73, 0x70, 0x63, 0x63, 0x2d, 0x64, 0x65, + 0x76, 0x2f, 0x6e, 0x65, 0x6f, 0x66, 0x73, 0x2d, 0x61, 0x70, 0x69, 0x2d, 0x67, 0x6f, 0x2f, 0x76, + 0x32, 0x2f, 0x72, 0x65, 0x70, 0x75, 0x74, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x2f, 0x67, 0x72, 0x70, + 0x63, 0x3b, 0x72, 0x65, 0x70, 0x75, 0x74, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0xaa, 0x02, 0x1e, 0x4e, + 0x65, 0x6f, 0x2e, 0x46, 0x69, 0x6c, 0x65, 0x53, 0x74, 0x6f, 0x72, 0x61, 0x67, 0x65, 0x2e, 0x41, + 0x50, 0x49, 0x2e, 0x52, 0x65, 0x70, 0x75, 0x74, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x62, 0x06, 0x70, + 0x72, 0x6f, 0x74, 0x6f, 0x33, +} + +var ( + file_reputation_grpc_types_proto_rawDescOnce sync.Once + file_reputation_grpc_types_proto_rawDescData = file_reputation_grpc_types_proto_rawDesc +) + +func file_reputation_grpc_types_proto_rawDescGZIP() []byte { + file_reputation_grpc_types_proto_rawDescOnce.Do(func() { + file_reputation_grpc_types_proto_rawDescData = protoimpl.X.CompressGZIP(file_reputation_grpc_types_proto_rawDescData) + }) + return file_reputation_grpc_types_proto_rawDescData +} + +var file_reputation_grpc_types_proto_msgTypes = make([]protoimpl.MessageInfo, 5) +var file_reputation_grpc_types_proto_goTypes = []interface{}{ + (*PeerID)(nil), // 0: neo.fs.v2.reputation.PeerID + (*Trust)(nil), // 1: neo.fs.v2.reputation.Trust + (*PeerToPeerTrust)(nil), // 2: neo.fs.v2.reputation.PeerToPeerTrust + (*GlobalTrust)(nil), // 3: neo.fs.v2.reputation.GlobalTrust + (*GlobalTrust_Body)(nil), // 4: neo.fs.v2.reputation.GlobalTrust.Body + (*refs.Version)(nil), // 5: neo.fs.v2.refs.Version + (*refs.Signature)(nil), // 6: neo.fs.v2.refs.Signature +} +var file_reputation_grpc_types_proto_depIdxs = []int32{ + 0, // 0: neo.fs.v2.reputation.Trust.peer:type_name -> neo.fs.v2.reputation.PeerID + 0, // 1: neo.fs.v2.reputation.PeerToPeerTrust.trusting_peer:type_name -> neo.fs.v2.reputation.PeerID + 1, // 2: neo.fs.v2.reputation.PeerToPeerTrust.trust:type_name -> neo.fs.v2.reputation.Trust + 5, // 3: neo.fs.v2.reputation.GlobalTrust.version:type_name -> neo.fs.v2.refs.Version + 4, // 4: neo.fs.v2.reputation.GlobalTrust.body:type_name -> neo.fs.v2.reputation.GlobalTrust.Body + 6, // 5: neo.fs.v2.reputation.GlobalTrust.signature:type_name -> neo.fs.v2.refs.Signature + 0, // 6: neo.fs.v2.reputation.GlobalTrust.Body.manager:type_name -> neo.fs.v2.reputation.PeerID + 1, // 7: neo.fs.v2.reputation.GlobalTrust.Body.trust:type_name -> neo.fs.v2.reputation.Trust + 8, // [8:8] is the sub-list for method output_type + 8, // [8:8] is the sub-list for method input_type + 8, // [8:8] is the sub-list for extension type_name + 8, // [8:8] is the sub-list for extension extendee + 0, // [0:8] is the sub-list for field type_name +} + +func init() { file_reputation_grpc_types_proto_init() } +func file_reputation_grpc_types_proto_init() { + if File_reputation_grpc_types_proto != nil { + return + } + if !protoimpl.UnsafeEnabled { + file_reputation_grpc_types_proto_msgTypes[0].Exporter = func(v interface{}, i int) interface{} { + switch v := v.(*PeerID); i { + case 0: + return &v.state + case 1: + return &v.sizeCache + case 2: + return &v.unknownFields + default: + return nil + } + } + file_reputation_grpc_types_proto_msgTypes[1].Exporter = func(v interface{}, i int) interface{} { + switch v := v.(*Trust); i { + case 0: + return &v.state + case 1: + return &v.sizeCache + case 2: + return &v.unknownFields + default: + return nil + } + } + file_reputation_grpc_types_proto_msgTypes[2].Exporter = func(v interface{}, i int) interface{} { + switch v := v.(*PeerToPeerTrust); i { + case 0: + return &v.state + case 1: + return &v.sizeCache + case 2: + return &v.unknownFields + default: + return nil + } + } + file_reputation_grpc_types_proto_msgTypes[3].Exporter = func(v interface{}, i int) interface{} { + switch v := v.(*GlobalTrust); i { + case 0: + return &v.state + case 1: + return &v.sizeCache + case 2: + return &v.unknownFields + default: + return nil + } + } + file_reputation_grpc_types_proto_msgTypes[4].Exporter = func(v interface{}, i int) interface{} { + switch v := v.(*GlobalTrust_Body); i { + case 0: + return &v.state + case 1: + return &v.sizeCache + case 2: + return &v.unknownFields + default: + return nil + } + } + } + type x struct{} + out := protoimpl.TypeBuilder{ + File: protoimpl.DescBuilder{ + GoPackagePath: reflect.TypeOf(x{}).PkgPath(), + RawDescriptor: file_reputation_grpc_types_proto_rawDesc, + NumEnums: 0, + NumMessages: 5, + NumExtensions: 0, + NumServices: 0, + }, + GoTypes: file_reputation_grpc_types_proto_goTypes, + DependencyIndexes: file_reputation_grpc_types_proto_depIdxs, + MessageInfos: file_reputation_grpc_types_proto_msgTypes, + }.Build() + File_reputation_grpc_types_proto = out.File + file_reputation_grpc_types_proto_rawDesc = nil + file_reputation_grpc_types_proto_goTypes = nil + file_reputation_grpc_types_proto_depIdxs = nil +} diff --git a/api/session/encoding.go b/api/session/encoding.go new file mode 100644 index 000000000..6c6459d0e --- /dev/null +++ b/api/session/encoding.go @@ -0,0 +1,380 @@ +package session + +import ( + "fmt" + + "github.com/nspcc-dev/neofs-sdk-go/internal/proto" +) + +const ( + _ = iota + fieldTokenObjectTargetContainer + fieldTokenObjectTargetIDs +) + +func (x *ObjectSessionContext_Target) MarshaledSize() int { + var sz int + if x != nil { + sz = proto.SizeNested(fieldTokenObjectTargetContainer, x.Container) + for i := range x.Objects { + sz += proto.SizeNested(fieldTokenObjectTargetIDs, x.Objects[i]) + } + } + return sz +} + +func (x *ObjectSessionContext_Target) MarshalStable(b []byte) { + if x != nil { + off := proto.MarshalNested(b, fieldTokenObjectTargetContainer, x.Container) + for i := range x.Objects { + off += proto.MarshalNested(b[off:], fieldTokenObjectTargetIDs, x.Objects[i]) + } + } +} + +const ( + _ = iota + fieldTokenObjectVerb + fieldTokenObjectTarget +) + +func (x *ObjectSessionContext) MarshaledSize() int { + var sz int + if x != nil { + sz = proto.SizeVarint(fieldTokenObjectVerb, int32(x.Verb)) + + proto.SizeNested(fieldTokenObjectTarget, x.Target) + } + return sz +} + +func (x *ObjectSessionContext) MarshalStable(b []byte) { + if x != nil { + off := proto.MarshalVarint(b, fieldTokenObjectVerb, int32(x.Verb)) + proto.MarshalNested(b[off:], fieldTokenObjectTarget, x.Target) + } +} + +const ( + _ = iota + fieldTokenContainerVerb + fieldTokenContainerWildcard + fieldTokenContainerID +) + +func (x *ContainerSessionContext) MarshaledSize() int { + var sz int + if x != nil { + sz = proto.SizeVarint(fieldTokenContainerVerb, int32(x.Verb)) + + proto.SizeBool(fieldTokenContainerWildcard, x.Wildcard) + + proto.SizeNested(fieldTokenContainerID, x.ContainerId) + } + return sz +} + +func (x *ContainerSessionContext) MarshalStable(b []byte) { + if x != nil { + off := proto.MarshalVarint(b, fieldTokenContainerVerb, int32(x.Verb)) + off += proto.MarshalBool(b[off:], fieldTokenContainerWildcard, x.Wildcard) + proto.MarshalNested(b[off:], fieldTokenContainerID, x.ContainerId) + } +} + +const ( + _ = iota + fieldTokenExp + fieldTokenNbf + fieldTokenIat +) + +func (x *SessionToken_Body_TokenLifetime) MarshaledSize() int { + var sz int + if x != nil { + sz = proto.SizeVarint(fieldTokenExp, x.Exp) + + proto.SizeVarint(fieldTokenNbf, x.Nbf) + + proto.SizeVarint(fieldTokenIat, x.Iat) + + } + return sz +} + +func (x *SessionToken_Body_TokenLifetime) MarshalStable(b []byte) { + if x != nil { + off := proto.MarshalVarint(b, fieldTokenExp, x.Exp) + off += proto.MarshalVarint(b[off:], fieldTokenNbf, x.Nbf) + proto.MarshalVarint(b[off:], fieldTokenIat, x.Iat) + } +} + +const ( + _ = iota + fieldTokenID + fieldTokenOwner + fieldTokenLifetime + fieldTokenSessionKey + fieldTokenContextObject + fieldTokenContextContainer +) + +func (x *SessionToken_Body) MarshaledSize() int { + var sz int + if x != nil { + sz = proto.SizeBytes(fieldTokenID, x.Id) + + proto.SizeNested(fieldTokenOwner, x.OwnerId) + + proto.SizeNested(fieldTokenLifetime, x.Lifetime) + + proto.SizeBytes(fieldTokenSessionKey, x.SessionKey) + switch c := x.Context.(type) { + default: + panic(fmt.Sprintf("unexpected context %T", x.Context)) + case nil: + case *SessionToken_Body_Object: + sz += proto.SizeNested(fieldTokenContextObject, c.Object) + case *SessionToken_Body_Container: + sz += proto.SizeNested(fieldTokenContextContainer, c.Container) + } + } + return sz +} + +func (x *SessionToken_Body) MarshalStable(b []byte) { + if x != nil { + off := proto.MarshalBytes(b, fieldTokenID, x.Id) + off += proto.MarshalNested(b[off:], fieldTokenOwner, x.OwnerId) + off += proto.MarshalNested(b[off:], fieldTokenLifetime, x.Lifetime) + off += proto.MarshalBytes(b[off:], fieldTokenSessionKey, x.SessionKey) + switch c := x.Context.(type) { + default: + panic(fmt.Sprintf("unexpected context %T", x.Context)) + case nil: + case *SessionToken_Body_Object: + proto.MarshalNested(b[off:], fieldTokenContextObject, c.Object) + case *SessionToken_Body_Container: + proto.MarshalNested(b[off:], fieldTokenContextContainer, c.Container) + } + } +} + +const ( + _ = iota + fieldTokenBody + fieldTokenSignature +) + +func (x *SessionToken) MarshaledSize() int { + var sz int + if x != nil { + sz = proto.SizeNested(fieldTokenBody, x.Body) + + proto.SizeNested(fieldTokenSignature, x.Signature) + } + return sz +} + +func (x *SessionToken) MarshalStable(b []byte) { + if x != nil { + off := proto.MarshalNested(b, fieldTokenBody, x.Body) + proto.MarshalNested(b[off:], fieldTokenSignature, x.Signature) + } +} + +const ( + _ = iota + fieldCreateReqUser + fieldCreateReqExp +) + +func (x *CreateRequest_Body) MarshaledSize() int { + var sz int + if x != nil { + sz = proto.SizeNested(fieldCreateReqUser, x.OwnerId) + + proto.SizeVarint(fieldCreateReqExp, x.Expiration) + } + return sz +} + +func (x *CreateRequest_Body) MarshalStable(b []byte) { + if x != nil { + off := proto.MarshalNested(b, fieldCreateReqUser, x.OwnerId) + proto.MarshalVarint(b[off:], fieldCreateReqExp, x.Expiration) + } +} + +const ( + _ = iota + fieldCreateRespID + fieldCreateRespSessionKey +) + +func (x *CreateResponse_Body) MarshaledSize() int { + var sz int + if x != nil { + sz = proto.SizeBytes(fieldCreateRespID, x.Id) + + proto.SizeBytes(fieldCreateRespSessionKey, x.SessionKey) + } + return sz +} + +func (x *CreateResponse_Body) MarshalStable(b []byte) { + if x != nil { + off := proto.MarshalBytes(b, fieldCreateRespID, x.Id) + proto.MarshalBytes(b[off:], fieldCreateRespSessionKey, x.SessionKey) + } +} + +const ( + _ = iota + fieldXHeaderKey + fieldXHeaderValue +) + +func (x *XHeader) MarshaledSize() int { + var sz int + if x != nil { + sz = proto.SizeBytes(fieldXHeaderKey, x.Key) + + proto.SizeBytes(fieldXHeaderValue, x.Value) + } + return sz +} + +func (x *XHeader) MarshalStable(b []byte) { + if x != nil { + off := proto.MarshalBytes(b, fieldXHeaderKey, x.Key) + proto.MarshalBytes(b[off:], fieldXHeaderValue, x.Value) + } +} + +const ( + _ = iota + fieldReqMetaVersion + fieldReqMetaEpoch + fieldReqMetaTTL + fieldReqMetaXHeaders + fieldReqMetaSession + fieldReqMetaBearer + fieldReqMetaOrigin + fieldReqMetaNetMagic +) + +func (x *RequestMetaHeader) MarshaledSize() int { + var sz int + if x != nil { + sz = proto.SizeNested(fieldReqMetaVersion, x.Version) + + proto.SizeVarint(fieldReqMetaEpoch, x.Epoch) + + proto.SizeVarint(fieldReqMetaTTL, x.Ttl) + + proto.SizeNested(fieldReqMetaSession, x.SessionToken) + + proto.SizeNested(fieldReqMetaBearer, x.BearerToken) + + proto.SizeNested(fieldReqMetaOrigin, x.Origin) + + proto.SizeVarint(fieldReqMetaNetMagic, x.MagicNumber) + for i := range x.XHeaders { + sz += proto.SizeNested(fieldReqMetaXHeaders, x.XHeaders[i]) + } + } + return sz +} + +func (x *RequestMetaHeader) MarshalStable(b []byte) { + if x != nil { + off := proto.MarshalNested(b, fieldReqMetaVersion, x.Version) + off += proto.MarshalVarint(b[off:], fieldReqMetaEpoch, x.Epoch) + off += proto.MarshalVarint(b[off:], fieldReqMetaTTL, x.Ttl) + for i := range x.XHeaders { + off += proto.MarshalNested(b[off:], fieldReqMetaXHeaders, x.XHeaders[i]) + } + off += proto.MarshalNested(b[off:], fieldReqMetaSession, x.SessionToken) + off += proto.MarshalNested(b[off:], fieldReqMetaBearer, x.BearerToken) + off += proto.MarshalNested(b[off:], fieldReqMetaOrigin, x.Origin) + off += proto.MarshalVarint(b[off:], fieldReqMetaNetMagic, x.MagicNumber) + } +} + +const ( + _ = iota + fieldRespMetaVersion + fieldRespMetaEpoch + fieldRespMetaTTL + fieldRespMetaXHeaders + fieldRespMetaOrigin + fieldRespMetaStatus +) + +func (x *ResponseMetaHeader) MarshaledSize() int { + var sz int + if x != nil { + sz = proto.SizeNested(fieldRespMetaVersion, x.Version) + + proto.SizeVarint(fieldRespMetaEpoch, x.Epoch) + + proto.SizeVarint(fieldRespMetaTTL, x.Ttl) + + proto.SizeNested(fieldRespMetaOrigin, x.Origin) + + proto.SizeNested(fieldRespMetaStatus, x.Status) + for i := range x.XHeaders { + sz += proto.SizeNested(fieldRespMetaXHeaders, x.XHeaders[i]) + } + } + return sz +} + +func (x *ResponseMetaHeader) MarshalStable(b []byte) { + if x != nil { + off := proto.MarshalNested(b, fieldRespMetaVersion, x.Version) + off += proto.MarshalVarint(b[off:], fieldRespMetaEpoch, x.Epoch) + off += proto.MarshalVarint(b[off:], fieldRespMetaTTL, x.Ttl) + for i := range x.XHeaders { + off += proto.MarshalNested(b[off:], fieldRespMetaXHeaders, x.XHeaders[i]) + } + off += proto.MarshalNested(b[off:], fieldRespMetaOrigin, x.Origin) + off += proto.MarshalNested(b[off:], fieldRespMetaStatus, x.Status) + } +} + +const ( + _ = iota + fieldReqVerifyBodySignature + fieldReqVerifyMetaSignature + fieldReqVerifyOriginSignature + fieldReqVerifyOrigin +) + +func (x *RequestVerificationHeader) MarshaledSize() int { + var sz int + if x != nil { + sz = proto.SizeNested(fieldReqVerifyBodySignature, x.BodySignature) + + proto.SizeNested(fieldReqVerifyMetaSignature, x.MetaSignature) + + proto.SizeNested(fieldReqVerifyOriginSignature, x.OriginSignature) + + proto.SizeNested(fieldReqVerifyOrigin, x.Origin) + } + return sz +} + +func (x *RequestVerificationHeader) MarshalStable(b []byte) { + if x != nil { + off := proto.MarshalNested(b, fieldReqVerifyBodySignature, x.BodySignature) + off += proto.MarshalNested(b[off:], fieldReqVerifyMetaSignature, x.MetaSignature) + off += proto.MarshalNested(b[off:], fieldReqVerifyOriginSignature, x.OriginSignature) + proto.MarshalNested(b[off:], fieldReqVerifyOrigin, x.Origin) + } +} + +const ( + _ = iota + fieldRespVerifyBodySignature + fieldRespVerifyMetaSignature + fieldRespVerifyOriginSignature + fieldRespVerifyOrigin +) + +func (x *ResponseVerificationHeader) MarshaledSize() int { + var sz int + if x != nil { + sz = proto.SizeNested(fieldRespVerifyBodySignature, x.BodySignature) + + proto.SizeNested(fieldRespVerifyMetaSignature, x.MetaSignature) + + proto.SizeNested(fieldRespVerifyOriginSignature, x.OriginSignature) + + proto.SizeNested(fieldRespVerifyOrigin, x.Origin) + } + return sz +} + +func (x *ResponseVerificationHeader) MarshalStable(b []byte) { + if x != nil { + off := proto.MarshalNested(b, fieldRespVerifyBodySignature, x.BodySignature) + off += proto.MarshalNested(b[off:], fieldRespVerifyMetaSignature, x.MetaSignature) + off += proto.MarshalNested(b[off:], fieldRespVerifyOriginSignature, x.OriginSignature) + proto.MarshalNested(b[off:], fieldRespVerifyOrigin, x.Origin) + } +} diff --git a/api/session/encoding_test.go b/api/session/encoding_test.go new file mode 100644 index 000000000..55bf2ffc1 --- /dev/null +++ b/api/session/encoding_test.go @@ -0,0 +1,318 @@ +package session_test + +import ( + "testing" + + "github.com/nspcc-dev/neofs-sdk-go/api/acl" + "github.com/nspcc-dev/neofs-sdk-go/api/refs" + "github.com/nspcc-dev/neofs-sdk-go/api/session" + "github.com/nspcc-dev/neofs-sdk-go/api/status" + "github.com/stretchr/testify/require" + "google.golang.org/protobuf/proto" +) + +func TestSessionToken_Body(t *testing.T) { + v := &session.SessionToken{ + Body: &session.SessionToken_Body{ + Id: []byte("any_ID"), + OwnerId: &refs.OwnerID{Value: []byte("any_owner")}, + Lifetime: &session.SessionToken_Body_TokenLifetime{Exp: 1, Nbf: 2, Iat: 3}, + SessionKey: []byte("any_key"), + }, + Signature: &refs.Signature{Key: []byte("any_key"), Sign: []byte("any_signature"), Scheme: 4}, + } + + testWithContext := func(setCtx func(*session.SessionToken_Body)) { + setCtx(v.Body) + sz := v.MarshaledSize() + b := make([]byte, sz) + v.MarshalStable(b) + + var res session.SessionToken + err := proto.Unmarshal(b, &res) + require.NoError(t, err) + require.Empty(t, res.ProtoReflect().GetUnknown()) + require.Equal(t, v.Body, res.Body) + require.Equal(t, v.Signature, res.Signature) + } + + testWithContext(func(body *session.SessionToken_Body) { body.Context = nil }) + testWithContext(func(body *session.SessionToken_Body) { + body.Context = &session.SessionToken_Body_Container{ + Container: &session.ContainerSessionContext{ + Verb: 1, Wildcard: true, + ContainerId: &refs.ContainerID{Value: []byte("any_container")}, + }, + } + }) + testWithContext(func(body *session.SessionToken_Body) { + body.Context = &session.SessionToken_Body_Object{ + Object: &session.ObjectSessionContext{ + Verb: 1, + Target: &session.ObjectSessionContext_Target{ + Container: &refs.ContainerID{Value: []byte("any_container")}, + Objects: []*refs.ObjectID{ + {Value: []byte("any_object1")}, + {Value: []byte("any_object2")}, + }, + }, + }, + } + }) +} + +func TestCreateRequest_Body(t *testing.T) { + v := &session.CreateRequest_Body{ + OwnerId: &refs.OwnerID{Value: []byte("any_owner")}, + Expiration: 1, + } + + sz := v.MarshaledSize() + b := make([]byte, sz) + v.MarshalStable(b) + + var res session.CreateRequest_Body + err := proto.Unmarshal(b, &res) + require.NoError(t, err) + require.Empty(t, res.ProtoReflect().GetUnknown()) + require.Equal(t, v.OwnerId, res.OwnerId) + require.Equal(t, v.Expiration, res.Expiration) +} + +func TestCreateResponse_Body(t *testing.T) { + v := &session.CreateResponse_Body{ + Id: []byte("any_ID"), + SessionKey: []byte("any_public_key"), + } + + sz := v.MarshaledSize() + b := make([]byte, sz) + v.MarshalStable(b) + + var res session.CreateResponse_Body + err := proto.Unmarshal(b, &res) + require.NoError(t, err) + require.Empty(t, res.ProtoReflect().GetUnknown()) + require.Equal(t, v.Id, res.Id) + require.Equal(t, v.SessionKey, res.SessionKey) +} + +func TestRequestMetaHeader(t *testing.T) { + v := &session.RequestMetaHeader{ + Version: &refs.Version{Major: 1, Minor: 2}, + Epoch: 3, + Ttl: 4, + XHeaders: []*session.XHeader{ + {Key: "any_key1", Value: "any_val1"}, + {Key: "any_key2", Value: "any_val2"}, + }, + SessionToken: &session.SessionToken{ + Body: &session.SessionToken_Body{ + Id: []byte("any_ID"), + OwnerId: &refs.OwnerID{Value: []byte("any_owner")}, + Lifetime: &session.SessionToken_Body_TokenLifetime{Exp: 101, Nbf: 102, Iat: 103}, + SessionKey: []byte("any_key"), + }, + Signature: &refs.Signature{Key: []byte("any_key"), Sign: []byte("any_signature"), Scheme: 104}, + }, + BearerToken: &acl.BearerToken{ + Body: &acl.BearerToken_Body{ + EaclTable: &acl.EACLTable{ + Version: &refs.Version{Major: 200, Minor: 201}, + ContainerId: &refs.ContainerID{Value: []byte("any_container")}, + Records: []*acl.EACLRecord{ + { + Operation: 203, Action: 204, + Filters: []*acl.EACLRecord_Filter{ + {HeaderType: 205, MatchType: 206, Key: "key1", Value: "val1"}, + {HeaderType: 207, MatchType: 208, Key: "key2", Value: "val2"}, + }, + Targets: []*acl.EACLRecord_Target{ + {Role: 209, Keys: [][]byte{{0}, {1}}}, + {Role: 210, Keys: [][]byte{{2}, {3}}}, + }, + }, + { + Operation: 211, Action: 212, + Filters: []*acl.EACLRecord_Filter{ + {HeaderType: 213, MatchType: 12, Key: "key3", Value: "val3"}, + {HeaderType: 214, MatchType: 14, Key: "key4", Value: "val4"}, + }, + Targets: []*acl.EACLRecord_Target{ + {Role: 215, Keys: [][]byte{{4}, {5}}}, + {Role: 216, Keys: [][]byte{{6}, {7}}}, + }, + }, + }, + }, + OwnerId: &refs.OwnerID{Value: []byte("any_owner")}, + Lifetime: &acl.BearerToken_Body_TokenLifetime{Exp: 217, Nbf: 218, Iat: 219}, + Issuer: &refs.OwnerID{Value: []byte("any_issuer")}, + }, + Signature: &refs.Signature{ + Key: []byte("any_public_key"), + Sign: []byte("any_signature"), + Scheme: 1, + }, + }, + Origin: &session.RequestMetaHeader{ + Version: &refs.Version{Major: 300, Minor: 301}, + Epoch: 302, + Ttl: 303, + XHeaders: []*session.XHeader{ + {Key: "any_key3", Value: "any_val3"}, + {Key: "any_key4", Value: "any_val4"}, + }, + MagicNumber: 304, + }, + MagicNumber: 5, + } + + testWithSessionContext := func(setCtx func(*session.SessionToken_Body)) { + setCtx(v.SessionToken.Body) + sz := v.MarshaledSize() + b := make([]byte, sz) + v.MarshalStable(b) + + var res session.RequestMetaHeader + err := proto.Unmarshal(b, &res) + require.NoError(t, err) + require.Empty(t, res.ProtoReflect().GetUnknown()) + require.Equal(t, v.Version, res.Version) + require.Equal(t, v.Epoch, res.Epoch) + require.Equal(t, v.Ttl, res.Ttl) + require.Equal(t, v.XHeaders, res.XHeaders) + require.Equal(t, v.SessionToken, res.SessionToken) + require.Equal(t, v.BearerToken, res.BearerToken) + require.Equal(t, v.Origin, res.Origin) + require.Equal(t, v.MagicNumber, res.MagicNumber) + } + + testWithSessionContext(func(body *session.SessionToken_Body) { body.Context = nil }) + testWithSessionContext(func(body *session.SessionToken_Body) { + body.Context = &session.SessionToken_Body_Container{ + Container: &session.ContainerSessionContext{ + Verb: 1, Wildcard: true, + ContainerId: &refs.ContainerID{Value: []byte("any_container")}, + }, + } + }) + testWithSessionContext(func(body *session.SessionToken_Body) { + body.Context = &session.SessionToken_Body_Object{ + Object: &session.ObjectSessionContext{ + Verb: 1, + Target: &session.ObjectSessionContext_Target{ + Container: &refs.ContainerID{Value: []byte("any_container")}, + Objects: []*refs.ObjectID{ + {Value: []byte("any_object1")}, + {Value: []byte("any_object2")}, + }, + }, + }, + } + }) +} + +func TestResponseMetaHeader(t *testing.T) { + v := &session.ResponseMetaHeader{ + Version: &refs.Version{Major: 1, Minor: 2}, + Epoch: 3, + Ttl: 4, + XHeaders: []*session.XHeader{ + {Key: "any_key1", Value: "any_val1"}, + {Key: "any_key2", Value: "any_val2"}, + }, + Origin: &session.ResponseMetaHeader{ + Version: &refs.Version{Major: 100, Minor: 101}, + Epoch: 102, + Ttl: 103, + XHeaders: []*session.XHeader{ + {Key: "any_key3", Value: "any_val3"}, + {Key: "any_key4", Value: "any_val4"}, + }, + Status: &status.Status{ + Code: 104, + Message: "any_message", + Details: []*status.Status_Detail{ + {Id: 105, Value: []byte("any_detail100")}, + {Id: 106, Value: []byte("any_detail101")}, + }, + }, + }, + Status: &status.Status{ + Code: 5, + Message: "any_message", + Details: []*status.Status_Detail{ + {Id: 6, Value: []byte("any_detail1")}, + {Id: 7, Value: []byte("any_detail2")}, + }, + }, + } + + sz := v.MarshaledSize() + b := make([]byte, sz) + v.MarshalStable(b) + + var res session.ResponseMetaHeader + err := proto.Unmarshal(b, &res) + require.NoError(t, err) + require.Empty(t, res.ProtoReflect().GetUnknown()) + require.Equal(t, v.Version, res.Version) + require.Equal(t, v.Epoch, res.Epoch) + require.Equal(t, v.Ttl, res.Ttl) + require.Equal(t, v.XHeaders, res.XHeaders) + require.Equal(t, v.Origin, res.Origin) + require.Equal(t, v.Status, res.Status) +} + +func TestRequestVerificationHeader(t *testing.T) { + v := &session.RequestVerificationHeader{ + BodySignature: &refs.Signature{Key: []byte("any_pubkey1"), Sign: []byte("any_signature1"), Scheme: 1}, + MetaSignature: &refs.Signature{Key: []byte("any_pubkey2"), Sign: []byte("any_signature2"), Scheme: 2}, + OriginSignature: &refs.Signature{Key: []byte("any_pubkey3"), Sign: []byte("any_signature3"), Scheme: 3}, + Origin: &session.RequestVerificationHeader{ + BodySignature: &refs.Signature{Key: []byte("any_pubkey100"), Sign: []byte("any_signature100"), Scheme: 100}, + MetaSignature: &refs.Signature{Key: []byte("any_pubkey101"), Sign: []byte("any_signature101"), Scheme: 101}, + OriginSignature: &refs.Signature{Key: []byte("any_pubkey102"), Sign: []byte("any_signature102"), Scheme: 102}, + }, + } + + sz := v.MarshaledSize() + b := make([]byte, sz) + v.MarshalStable(b) + + var res session.RequestVerificationHeader + err := proto.Unmarshal(b, &res) + require.NoError(t, err) + require.Empty(t, res.ProtoReflect().GetUnknown()) + require.Equal(t, v.BodySignature, res.BodySignature) + require.Equal(t, v.MetaSignature, res.MetaSignature) + require.Equal(t, v.OriginSignature, res.OriginSignature) + require.Equal(t, v.Origin, res.Origin) +} + +func TestResponseVerificationHeader(t *testing.T) { + v := &session.ResponseVerificationHeader{ + BodySignature: &refs.Signature{Key: []byte("any_pubkey1"), Sign: []byte("any_signature1"), Scheme: 1}, + MetaSignature: &refs.Signature{Key: []byte("any_pubkey2"), Sign: []byte("any_signature2"), Scheme: 2}, + OriginSignature: &refs.Signature{Key: []byte("any_pubkey3"), Sign: []byte("any_signature3"), Scheme: 3}, + Origin: &session.ResponseVerificationHeader{ + BodySignature: &refs.Signature{Key: []byte("any_pubkey100"), Sign: []byte("any_signature100"), Scheme: 100}, + MetaSignature: &refs.Signature{Key: []byte("any_pubkey101"), Sign: []byte("any_signature101"), Scheme: 101}, + OriginSignature: &refs.Signature{Key: []byte("any_pubkey102"), Sign: []byte("any_signature102"), Scheme: 102}, + }, + } + + sz := v.MarshaledSize() + b := make([]byte, sz) + v.MarshalStable(b) + + var res session.ResponseVerificationHeader + err := proto.Unmarshal(b, &res) + require.NoError(t, err) + require.Empty(t, res.ProtoReflect().GetUnknown()) + require.Equal(t, v.BodySignature, res.BodySignature) + require.Equal(t, v.MetaSignature, res.MetaSignature) + require.Equal(t, v.OriginSignature, res.OriginSignature) + require.Equal(t, v.Origin, res.Origin) +} diff --git a/api/session/service.pb.go b/api/session/service.pb.go new file mode 100644 index 000000000..ff6b811f8 --- /dev/null +++ b/api/session/service.pb.go @@ -0,0 +1,458 @@ +// Code generated by protoc-gen-go. DO NOT EDIT. +// versions: +// protoc-gen-go v1.32.0 +// protoc v4.25.1 +// source: session/grpc/service.proto + +package session + +import ( + reflect "reflect" + sync "sync" + + "github.com/nspcc-dev/neofs-sdk-go/api/refs" + protoreflect "google.golang.org/protobuf/reflect/protoreflect" + protoimpl "google.golang.org/protobuf/runtime/protoimpl" +) + +const ( + // Verify that this generated code is sufficiently up-to-date. + _ = protoimpl.EnforceVersion(20 - protoimpl.MinVersion) + // Verify that runtime/protoimpl is sufficiently up-to-date. + _ = protoimpl.EnforceVersion(protoimpl.MaxVersion - 20) +) + +// Information necessary for opening a session. +type CreateRequest struct { + state protoimpl.MessageState + sizeCache protoimpl.SizeCache + unknownFields protoimpl.UnknownFields + + // Body of a create session token request message. + Body *CreateRequest_Body `protobuf:"bytes,1,opt,name=body,proto3" json:"body,omitempty"` + // Carries request meta information. Header data is used only to regulate + // message transport and does not affect request execution. + MetaHeader *RequestMetaHeader `protobuf:"bytes,2,opt,name=meta_header,json=metaHeader,proto3" json:"meta_header,omitempty"` + // Carries request verification information. This header is used to + // authenticate the nodes of the message route and check the correctness of + // transmission. + VerifyHeader *RequestVerificationHeader `protobuf:"bytes,3,opt,name=verify_header,json=verifyHeader,proto3" json:"verify_header,omitempty"` +} + +func (x *CreateRequest) Reset() { + *x = CreateRequest{} + if protoimpl.UnsafeEnabled { + mi := &file_session_grpc_service_proto_msgTypes[0] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) + } +} + +func (x *CreateRequest) String() string { + return protoimpl.X.MessageStringOf(x) +} + +func (*CreateRequest) ProtoMessage() {} + +func (x *CreateRequest) ProtoReflect() protoreflect.Message { + mi := &file_session_grpc_service_proto_msgTypes[0] + if protoimpl.UnsafeEnabled && x != nil { + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + if ms.LoadMessageInfo() == nil { + ms.StoreMessageInfo(mi) + } + return ms + } + return mi.MessageOf(x) +} + +// Deprecated: Use CreateRequest.ProtoReflect.Descriptor instead. +func (*CreateRequest) Descriptor() ([]byte, []int) { + return file_session_grpc_service_proto_rawDescGZIP(), []int{0} +} + +func (x *CreateRequest) GetBody() *CreateRequest_Body { + if x != nil { + return x.Body + } + return nil +} + +func (x *CreateRequest) GetMetaHeader() *RequestMetaHeader { + if x != nil { + return x.MetaHeader + } + return nil +} + +func (x *CreateRequest) GetVerifyHeader() *RequestVerificationHeader { + if x != nil { + return x.VerifyHeader + } + return nil +} + +// Information about the opened session. +type CreateResponse struct { + state protoimpl.MessageState + sizeCache protoimpl.SizeCache + unknownFields protoimpl.UnknownFields + + // Body of create session token response message. + Body *CreateResponse_Body `protobuf:"bytes,1,opt,name=body,proto3" json:"body,omitempty"` + // Carries response meta information. Header data is used only to regulate + // message transport and does not affect request execution. + MetaHeader *ResponseMetaHeader `protobuf:"bytes,2,opt,name=meta_header,json=metaHeader,proto3" json:"meta_header,omitempty"` + // Carries response verification information. This header is used to + // authenticate the nodes of the message route and check the correctness of + // transmission. + VerifyHeader *ResponseVerificationHeader `protobuf:"bytes,3,opt,name=verify_header,json=verifyHeader,proto3" json:"verify_header,omitempty"` +} + +func (x *CreateResponse) Reset() { + *x = CreateResponse{} + if protoimpl.UnsafeEnabled { + mi := &file_session_grpc_service_proto_msgTypes[1] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) + } +} + +func (x *CreateResponse) String() string { + return protoimpl.X.MessageStringOf(x) +} + +func (*CreateResponse) ProtoMessage() {} + +func (x *CreateResponse) ProtoReflect() protoreflect.Message { + mi := &file_session_grpc_service_proto_msgTypes[1] + if protoimpl.UnsafeEnabled && x != nil { + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + if ms.LoadMessageInfo() == nil { + ms.StoreMessageInfo(mi) + } + return ms + } + return mi.MessageOf(x) +} + +// Deprecated: Use CreateResponse.ProtoReflect.Descriptor instead. +func (*CreateResponse) Descriptor() ([]byte, []int) { + return file_session_grpc_service_proto_rawDescGZIP(), []int{1} +} + +func (x *CreateResponse) GetBody() *CreateResponse_Body { + if x != nil { + return x.Body + } + return nil +} + +func (x *CreateResponse) GetMetaHeader() *ResponseMetaHeader { + if x != nil { + return x.MetaHeader + } + return nil +} + +func (x *CreateResponse) GetVerifyHeader() *ResponseVerificationHeader { + if x != nil { + return x.VerifyHeader + } + return nil +} + +// Session creation request body +type CreateRequest_Body struct { + state protoimpl.MessageState + sizeCache protoimpl.SizeCache + unknownFields protoimpl.UnknownFields + + // Session initiating user's or node's key derived `OwnerID` + OwnerId *refs.OwnerID `protobuf:"bytes,1,opt,name=owner_id,json=ownerId,proto3" json:"owner_id,omitempty"` + // Session expiration `Epoch` + Expiration uint64 `protobuf:"varint,2,opt,name=expiration,proto3" json:"expiration,omitempty"` +} + +func (x *CreateRequest_Body) Reset() { + *x = CreateRequest_Body{} + if protoimpl.UnsafeEnabled { + mi := &file_session_grpc_service_proto_msgTypes[2] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) + } +} + +func (x *CreateRequest_Body) String() string { + return protoimpl.X.MessageStringOf(x) +} + +func (*CreateRequest_Body) ProtoMessage() {} + +func (x *CreateRequest_Body) ProtoReflect() protoreflect.Message { + mi := &file_session_grpc_service_proto_msgTypes[2] + if protoimpl.UnsafeEnabled && x != nil { + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + if ms.LoadMessageInfo() == nil { + ms.StoreMessageInfo(mi) + } + return ms + } + return mi.MessageOf(x) +} + +// Deprecated: Use CreateRequest_Body.ProtoReflect.Descriptor instead. +func (*CreateRequest_Body) Descriptor() ([]byte, []int) { + return file_session_grpc_service_proto_rawDescGZIP(), []int{0, 0} +} + +func (x *CreateRequest_Body) GetOwnerId() *refs.OwnerID { + if x != nil { + return x.OwnerId + } + return nil +} + +func (x *CreateRequest_Body) GetExpiration() uint64 { + if x != nil { + return x.Expiration + } + return 0 +} + +// Session creation response body +type CreateResponse_Body struct { + state protoimpl.MessageState + sizeCache protoimpl.SizeCache + unknownFields protoimpl.UnknownFields + + // Identifier of a newly created session + Id []byte `protobuf:"bytes,1,opt,name=id,proto3" json:"id,omitempty"` + // Public key used for session + SessionKey []byte `protobuf:"bytes,2,opt,name=session_key,json=sessionKey,proto3" json:"session_key,omitempty"` +} + +func (x *CreateResponse_Body) Reset() { + *x = CreateResponse_Body{} + if protoimpl.UnsafeEnabled { + mi := &file_session_grpc_service_proto_msgTypes[3] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) + } +} + +func (x *CreateResponse_Body) String() string { + return protoimpl.X.MessageStringOf(x) +} + +func (*CreateResponse_Body) ProtoMessage() {} + +func (x *CreateResponse_Body) ProtoReflect() protoreflect.Message { + mi := &file_session_grpc_service_proto_msgTypes[3] + if protoimpl.UnsafeEnabled && x != nil { + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + if ms.LoadMessageInfo() == nil { + ms.StoreMessageInfo(mi) + } + return ms + } + return mi.MessageOf(x) +} + +// Deprecated: Use CreateResponse_Body.ProtoReflect.Descriptor instead. +func (*CreateResponse_Body) Descriptor() ([]byte, []int) { + return file_session_grpc_service_proto_rawDescGZIP(), []int{1, 0} +} + +func (x *CreateResponse_Body) GetId() []byte { + if x != nil { + return x.Id + } + return nil +} + +func (x *CreateResponse_Body) GetSessionKey() []byte { + if x != nil { + return x.SessionKey + } + return nil +} + +var File_session_grpc_service_proto protoreflect.FileDescriptor + +var file_session_grpc_service_proto_rawDesc = []byte{ + 0x0a, 0x1a, 0x73, 0x65, 0x73, 0x73, 0x69, 0x6f, 0x6e, 0x2f, 0x67, 0x72, 0x70, 0x63, 0x2f, 0x73, + 0x65, 0x72, 0x76, 0x69, 0x63, 0x65, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x12, 0x11, 0x6e, 0x65, + 0x6f, 0x2e, 0x66, 0x73, 0x2e, 0x76, 0x32, 0x2e, 0x73, 0x65, 0x73, 0x73, 0x69, 0x6f, 0x6e, 0x1a, + 0x15, 0x72, 0x65, 0x66, 0x73, 0x2f, 0x67, 0x72, 0x70, 0x63, 0x2f, 0x74, 0x79, 0x70, 0x65, 0x73, + 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x1a, 0x18, 0x73, 0x65, 0x73, 0x73, 0x69, 0x6f, 0x6e, 0x2f, + 0x67, 0x72, 0x70, 0x63, 0x2f, 0x74, 0x79, 0x70, 0x65, 0x73, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, + 0x22, 0xc0, 0x02, 0x0a, 0x0d, 0x43, 0x72, 0x65, 0x61, 0x74, 0x65, 0x52, 0x65, 0x71, 0x75, 0x65, + 0x73, 0x74, 0x12, 0x39, 0x0a, 0x04, 0x62, 0x6f, 0x64, 0x79, 0x18, 0x01, 0x20, 0x01, 0x28, 0x0b, + 0x32, 0x25, 0x2e, 0x6e, 0x65, 0x6f, 0x2e, 0x66, 0x73, 0x2e, 0x76, 0x32, 0x2e, 0x73, 0x65, 0x73, + 0x73, 0x69, 0x6f, 0x6e, 0x2e, 0x43, 0x72, 0x65, 0x61, 0x74, 0x65, 0x52, 0x65, 0x71, 0x75, 0x65, + 0x73, 0x74, 0x2e, 0x42, 0x6f, 0x64, 0x79, 0x52, 0x04, 0x62, 0x6f, 0x64, 0x79, 0x12, 0x45, 0x0a, + 0x0b, 0x6d, 0x65, 0x74, 0x61, 0x5f, 0x68, 0x65, 0x61, 0x64, 0x65, 0x72, 0x18, 0x02, 0x20, 0x01, + 0x28, 0x0b, 0x32, 0x24, 0x2e, 0x6e, 0x65, 0x6f, 0x2e, 0x66, 0x73, 0x2e, 0x76, 0x32, 0x2e, 0x73, + 0x65, 0x73, 0x73, 0x69, 0x6f, 0x6e, 0x2e, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x4d, 0x65, + 0x74, 0x61, 0x48, 0x65, 0x61, 0x64, 0x65, 0x72, 0x52, 0x0a, 0x6d, 0x65, 0x74, 0x61, 0x48, 0x65, + 0x61, 0x64, 0x65, 0x72, 0x12, 0x51, 0x0a, 0x0d, 0x76, 0x65, 0x72, 0x69, 0x66, 0x79, 0x5f, 0x68, + 0x65, 0x61, 0x64, 0x65, 0x72, 0x18, 0x03, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x2c, 0x2e, 0x6e, 0x65, + 0x6f, 0x2e, 0x66, 0x73, 0x2e, 0x76, 0x32, 0x2e, 0x73, 0x65, 0x73, 0x73, 0x69, 0x6f, 0x6e, 0x2e, + 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x56, 0x65, 0x72, 0x69, 0x66, 0x69, 0x63, 0x61, 0x74, + 0x69, 0x6f, 0x6e, 0x48, 0x65, 0x61, 0x64, 0x65, 0x72, 0x52, 0x0c, 0x76, 0x65, 0x72, 0x69, 0x66, + 0x79, 0x48, 0x65, 0x61, 0x64, 0x65, 0x72, 0x1a, 0x5a, 0x0a, 0x04, 0x42, 0x6f, 0x64, 0x79, 0x12, + 0x32, 0x0a, 0x08, 0x6f, 0x77, 0x6e, 0x65, 0x72, 0x5f, 0x69, 0x64, 0x18, 0x01, 0x20, 0x01, 0x28, + 0x0b, 0x32, 0x17, 0x2e, 0x6e, 0x65, 0x6f, 0x2e, 0x66, 0x73, 0x2e, 0x76, 0x32, 0x2e, 0x72, 0x65, + 0x66, 0x73, 0x2e, 0x4f, 0x77, 0x6e, 0x65, 0x72, 0x49, 0x44, 0x52, 0x07, 0x6f, 0x77, 0x6e, 0x65, + 0x72, 0x49, 0x64, 0x12, 0x1e, 0x0a, 0x0a, 0x65, 0x78, 0x70, 0x69, 0x72, 0x61, 0x74, 0x69, 0x6f, + 0x6e, 0x18, 0x02, 0x20, 0x01, 0x28, 0x04, 0x52, 0x0a, 0x65, 0x78, 0x70, 0x69, 0x72, 0x61, 0x74, + 0x69, 0x6f, 0x6e, 0x22, 0xa1, 0x02, 0x0a, 0x0e, 0x43, 0x72, 0x65, 0x61, 0x74, 0x65, 0x52, 0x65, + 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x12, 0x3a, 0x0a, 0x04, 0x62, 0x6f, 0x64, 0x79, 0x18, 0x01, + 0x20, 0x01, 0x28, 0x0b, 0x32, 0x26, 0x2e, 0x6e, 0x65, 0x6f, 0x2e, 0x66, 0x73, 0x2e, 0x76, 0x32, + 0x2e, 0x73, 0x65, 0x73, 0x73, 0x69, 0x6f, 0x6e, 0x2e, 0x43, 0x72, 0x65, 0x61, 0x74, 0x65, 0x52, + 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x2e, 0x42, 0x6f, 0x64, 0x79, 0x52, 0x04, 0x62, 0x6f, + 0x64, 0x79, 0x12, 0x46, 0x0a, 0x0b, 0x6d, 0x65, 0x74, 0x61, 0x5f, 0x68, 0x65, 0x61, 0x64, 0x65, + 0x72, 0x18, 0x02, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x25, 0x2e, 0x6e, 0x65, 0x6f, 0x2e, 0x66, 0x73, + 0x2e, 0x76, 0x32, 0x2e, 0x73, 0x65, 0x73, 0x73, 0x69, 0x6f, 0x6e, 0x2e, 0x52, 0x65, 0x73, 0x70, + 0x6f, 0x6e, 0x73, 0x65, 0x4d, 0x65, 0x74, 0x61, 0x48, 0x65, 0x61, 0x64, 0x65, 0x72, 0x52, 0x0a, + 0x6d, 0x65, 0x74, 0x61, 0x48, 0x65, 0x61, 0x64, 0x65, 0x72, 0x12, 0x52, 0x0a, 0x0d, 0x76, 0x65, + 0x72, 0x69, 0x66, 0x79, 0x5f, 0x68, 0x65, 0x61, 0x64, 0x65, 0x72, 0x18, 0x03, 0x20, 0x01, 0x28, + 0x0b, 0x32, 0x2d, 0x2e, 0x6e, 0x65, 0x6f, 0x2e, 0x66, 0x73, 0x2e, 0x76, 0x32, 0x2e, 0x73, 0x65, + 0x73, 0x73, 0x69, 0x6f, 0x6e, 0x2e, 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x56, 0x65, + 0x72, 0x69, 0x66, 0x69, 0x63, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x48, 0x65, 0x61, 0x64, 0x65, 0x72, + 0x52, 0x0c, 0x76, 0x65, 0x72, 0x69, 0x66, 0x79, 0x48, 0x65, 0x61, 0x64, 0x65, 0x72, 0x1a, 0x37, + 0x0a, 0x04, 0x42, 0x6f, 0x64, 0x79, 0x12, 0x0e, 0x0a, 0x02, 0x69, 0x64, 0x18, 0x01, 0x20, 0x01, + 0x28, 0x0c, 0x52, 0x02, 0x69, 0x64, 0x12, 0x1f, 0x0a, 0x0b, 0x73, 0x65, 0x73, 0x73, 0x69, 0x6f, + 0x6e, 0x5f, 0x6b, 0x65, 0x79, 0x18, 0x02, 0x20, 0x01, 0x28, 0x0c, 0x52, 0x0a, 0x73, 0x65, 0x73, + 0x73, 0x69, 0x6f, 0x6e, 0x4b, 0x65, 0x79, 0x32, 0x5f, 0x0a, 0x0e, 0x53, 0x65, 0x73, 0x73, 0x69, + 0x6f, 0x6e, 0x53, 0x65, 0x72, 0x76, 0x69, 0x63, 0x65, 0x12, 0x4d, 0x0a, 0x06, 0x43, 0x72, 0x65, + 0x61, 0x74, 0x65, 0x12, 0x20, 0x2e, 0x6e, 0x65, 0x6f, 0x2e, 0x66, 0x73, 0x2e, 0x76, 0x32, 0x2e, + 0x73, 0x65, 0x73, 0x73, 0x69, 0x6f, 0x6e, 0x2e, 0x43, 0x72, 0x65, 0x61, 0x74, 0x65, 0x52, 0x65, + 0x71, 0x75, 0x65, 0x73, 0x74, 0x1a, 0x21, 0x2e, 0x6e, 0x65, 0x6f, 0x2e, 0x66, 0x73, 0x2e, 0x76, + 0x32, 0x2e, 0x73, 0x65, 0x73, 0x73, 0x69, 0x6f, 0x6e, 0x2e, 0x43, 0x72, 0x65, 0x61, 0x74, 0x65, + 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x42, 0x59, 0x5a, 0x39, 0x67, 0x69, 0x74, 0x68, + 0x75, 0x62, 0x2e, 0x63, 0x6f, 0x6d, 0x2f, 0x6e, 0x73, 0x70, 0x63, 0x63, 0x2d, 0x64, 0x65, 0x76, + 0x2f, 0x6e, 0x65, 0x6f, 0x66, 0x73, 0x2d, 0x61, 0x70, 0x69, 0x2d, 0x67, 0x6f, 0x2f, 0x76, 0x32, + 0x2f, 0x73, 0x65, 0x73, 0x73, 0x69, 0x6f, 0x6e, 0x2f, 0x67, 0x72, 0x70, 0x63, 0x3b, 0x73, 0x65, + 0x73, 0x73, 0x69, 0x6f, 0x6e, 0xaa, 0x02, 0x1b, 0x4e, 0x65, 0x6f, 0x2e, 0x46, 0x69, 0x6c, 0x65, + 0x53, 0x74, 0x6f, 0x72, 0x61, 0x67, 0x65, 0x2e, 0x41, 0x50, 0x49, 0x2e, 0x53, 0x65, 0x73, 0x73, + 0x69, 0x6f, 0x6e, 0x62, 0x06, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x33, +} + +var ( + file_session_grpc_service_proto_rawDescOnce sync.Once + file_session_grpc_service_proto_rawDescData = file_session_grpc_service_proto_rawDesc +) + +func file_session_grpc_service_proto_rawDescGZIP() []byte { + file_session_grpc_service_proto_rawDescOnce.Do(func() { + file_session_grpc_service_proto_rawDescData = protoimpl.X.CompressGZIP(file_session_grpc_service_proto_rawDescData) + }) + return file_session_grpc_service_proto_rawDescData +} + +var file_session_grpc_service_proto_msgTypes = make([]protoimpl.MessageInfo, 4) +var file_session_grpc_service_proto_goTypes = []interface{}{ + (*CreateRequest)(nil), // 0: neo.fs.v2.session.CreateRequest + (*CreateResponse)(nil), // 1: neo.fs.v2.session.CreateResponse + (*CreateRequest_Body)(nil), // 2: neo.fs.v2.session.CreateRequest.Body + (*CreateResponse_Body)(nil), // 3: neo.fs.v2.session.CreateResponse.Body + (*RequestMetaHeader)(nil), // 4: neo.fs.v2.session.RequestMetaHeader + (*RequestVerificationHeader)(nil), // 5: neo.fs.v2.session.RequestVerificationHeader + (*ResponseMetaHeader)(nil), // 6: neo.fs.v2.session.ResponseMetaHeader + (*ResponseVerificationHeader)(nil), // 7: neo.fs.v2.session.ResponseVerificationHeader + (*refs.OwnerID)(nil), // 8: neo.fs.v2.refs.OwnerID +} +var file_session_grpc_service_proto_depIdxs = []int32{ + 2, // 0: neo.fs.v2.session.CreateRequest.body:type_name -> neo.fs.v2.session.CreateRequest.Body + 4, // 1: neo.fs.v2.session.CreateRequest.meta_header:type_name -> neo.fs.v2.session.RequestMetaHeader + 5, // 2: neo.fs.v2.session.CreateRequest.verify_header:type_name -> neo.fs.v2.session.RequestVerificationHeader + 3, // 3: neo.fs.v2.session.CreateResponse.body:type_name -> neo.fs.v2.session.CreateResponse.Body + 6, // 4: neo.fs.v2.session.CreateResponse.meta_header:type_name -> neo.fs.v2.session.ResponseMetaHeader + 7, // 5: neo.fs.v2.session.CreateResponse.verify_header:type_name -> neo.fs.v2.session.ResponseVerificationHeader + 8, // 6: neo.fs.v2.session.CreateRequest.Body.owner_id:type_name -> neo.fs.v2.refs.OwnerID + 0, // 7: neo.fs.v2.session.SessionService.Create:input_type -> neo.fs.v2.session.CreateRequest + 1, // 8: neo.fs.v2.session.SessionService.Create:output_type -> neo.fs.v2.session.CreateResponse + 8, // [8:9] is the sub-list for method output_type + 7, // [7:8] is the sub-list for method input_type + 7, // [7:7] is the sub-list for extension type_name + 7, // [7:7] is the sub-list for extension extendee + 0, // [0:7] is the sub-list for field type_name +} + +func init() { file_session_grpc_service_proto_init() } +func file_session_grpc_service_proto_init() { + if File_session_grpc_service_proto != nil { + return + } + file_session_grpc_types_proto_init() + if !protoimpl.UnsafeEnabled { + file_session_grpc_service_proto_msgTypes[0].Exporter = func(v interface{}, i int) interface{} { + switch v := v.(*CreateRequest); i { + case 0: + return &v.state + case 1: + return &v.sizeCache + case 2: + return &v.unknownFields + default: + return nil + } + } + file_session_grpc_service_proto_msgTypes[1].Exporter = func(v interface{}, i int) interface{} { + switch v := v.(*CreateResponse); i { + case 0: + return &v.state + case 1: + return &v.sizeCache + case 2: + return &v.unknownFields + default: + return nil + } + } + file_session_grpc_service_proto_msgTypes[2].Exporter = func(v interface{}, i int) interface{} { + switch v := v.(*CreateRequest_Body); i { + case 0: + return &v.state + case 1: + return &v.sizeCache + case 2: + return &v.unknownFields + default: + return nil + } + } + file_session_grpc_service_proto_msgTypes[3].Exporter = func(v interface{}, i int) interface{} { + switch v := v.(*CreateResponse_Body); i { + case 0: + return &v.state + case 1: + return &v.sizeCache + case 2: + return &v.unknownFields + default: + return nil + } + } + } + type x struct{} + out := protoimpl.TypeBuilder{ + File: protoimpl.DescBuilder{ + GoPackagePath: reflect.TypeOf(x{}).PkgPath(), + RawDescriptor: file_session_grpc_service_proto_rawDesc, + NumEnums: 0, + NumMessages: 4, + NumExtensions: 0, + NumServices: 1, + }, + GoTypes: file_session_grpc_service_proto_goTypes, + DependencyIndexes: file_session_grpc_service_proto_depIdxs, + MessageInfos: file_session_grpc_service_proto_msgTypes, + }.Build() + File_session_grpc_service_proto = out.File + file_session_grpc_service_proto_rawDesc = nil + file_session_grpc_service_proto_goTypes = nil + file_session_grpc_service_proto_depIdxs = nil +} diff --git a/api/session/service_grpc.pb.go b/api/session/service_grpc.pb.go new file mode 100644 index 000000000..041bdff67 --- /dev/null +++ b/api/session/service_grpc.pb.go @@ -0,0 +1,119 @@ +// Code generated by protoc-gen-go-grpc. DO NOT EDIT. +// versions: +// - protoc-gen-go-grpc v1.3.0 +// - protoc v4.25.1 +// source: session/grpc/service.proto + +package session + +import ( + context "context" + grpc "google.golang.org/grpc" + codes "google.golang.org/grpc/codes" + status "google.golang.org/grpc/status" +) + +// This is a compile-time assertion to ensure that this generated file +// is compatible with the grpc package it is being compiled against. +// Requires gRPC-Go v1.32.0 or later. +const _ = grpc.SupportPackageIsVersion7 + +const ( + SessionService_Create_FullMethodName = "/neo.fs.v2.session.SessionService/Create" +) + +// SessionServiceClient is the client API for SessionService service. +// +// For semantics around ctx use and closing/ending streaming RPCs, please refer to https://pkg.go.dev/google.golang.org/grpc/?tab=doc#ClientConn.NewStream. +type SessionServiceClient interface { + // Open a new session between two peers. + // + // Statuses: + // - **OK** (0, SECTION_SUCCESS): + // session has been successfully opened; + // - Common failures (SECTION_FAILURE_COMMON). + Create(ctx context.Context, in *CreateRequest, opts ...grpc.CallOption) (*CreateResponse, error) +} + +type sessionServiceClient struct { + cc grpc.ClientConnInterface +} + +func NewSessionServiceClient(cc grpc.ClientConnInterface) SessionServiceClient { + return &sessionServiceClient{cc} +} + +func (c *sessionServiceClient) Create(ctx context.Context, in *CreateRequest, opts ...grpc.CallOption) (*CreateResponse, error) { + out := new(CreateResponse) + err := c.cc.Invoke(ctx, SessionService_Create_FullMethodName, in, out, opts...) + if err != nil { + return nil, err + } + return out, nil +} + +// SessionServiceServer is the server API for SessionService service. +// All implementations should embed UnimplementedSessionServiceServer +// for forward compatibility +type SessionServiceServer interface { + // Open a new session between two peers. + // + // Statuses: + // - **OK** (0, SECTION_SUCCESS): + // session has been successfully opened; + // - Common failures (SECTION_FAILURE_COMMON). + Create(context.Context, *CreateRequest) (*CreateResponse, error) +} + +// UnimplementedSessionServiceServer should be embedded to have forward compatible implementations. +type UnimplementedSessionServiceServer struct { +} + +func (UnimplementedSessionServiceServer) Create(context.Context, *CreateRequest) (*CreateResponse, error) { + return nil, status.Errorf(codes.Unimplemented, "method Create not implemented") +} + +// UnsafeSessionServiceServer may be embedded to opt out of forward compatibility for this service. +// Use of this interface is not recommended, as added methods to SessionServiceServer will +// result in compilation errors. +type UnsafeSessionServiceServer interface { + mustEmbedUnimplementedSessionServiceServer() +} + +func RegisterSessionServiceServer(s grpc.ServiceRegistrar, srv SessionServiceServer) { + s.RegisterService(&SessionService_ServiceDesc, srv) +} + +func _SessionService_Create_Handler(srv interface{}, ctx context.Context, dec func(interface{}) error, interceptor grpc.UnaryServerInterceptor) (interface{}, error) { + in := new(CreateRequest) + if err := dec(in); err != nil { + return nil, err + } + if interceptor == nil { + return srv.(SessionServiceServer).Create(ctx, in) + } + info := &grpc.UnaryServerInfo{ + Server: srv, + FullMethod: SessionService_Create_FullMethodName, + } + handler := func(ctx context.Context, req interface{}) (interface{}, error) { + return srv.(SessionServiceServer).Create(ctx, req.(*CreateRequest)) + } + return interceptor(ctx, in, info, handler) +} + +// SessionService_ServiceDesc is the grpc.ServiceDesc for SessionService service. +// It's only intended for direct use with grpc.RegisterService, +// and not to be introspected or modified (even as a copy) +var SessionService_ServiceDesc = grpc.ServiceDesc{ + ServiceName: "neo.fs.v2.session.SessionService", + HandlerType: (*SessionServiceServer)(nil), + Methods: []grpc.MethodDesc{ + { + MethodName: "Create", + Handler: _SessionService_Create_Handler, + }, + }, + Streams: []grpc.StreamDesc{}, + Metadata: "session/grpc/service.proto", +} diff --git a/api/session/types.pb.go b/api/session/types.pb.go new file mode 100644 index 000000000..d4bdf62bd --- /dev/null +++ b/api/session/types.pb.go @@ -0,0 +1,1444 @@ +// Code generated by protoc-gen-go. DO NOT EDIT. +// versions: +// protoc-gen-go v1.32.0 +// protoc v4.25.1 +// source: session/grpc/types.proto + +package session + +import ( + reflect "reflect" + sync "sync" + + "github.com/nspcc-dev/neofs-sdk-go/api/acl" + "github.com/nspcc-dev/neofs-sdk-go/api/refs" + "github.com/nspcc-dev/neofs-sdk-go/api/status" + protoreflect "google.golang.org/protobuf/reflect/protoreflect" + protoimpl "google.golang.org/protobuf/runtime/protoimpl" +) + +const ( + // Verify that this generated code is sufficiently up-to-date. + _ = protoimpl.EnforceVersion(20 - protoimpl.MinVersion) + // Verify that runtime/protoimpl is sufficiently up-to-date. + _ = protoimpl.EnforceVersion(protoimpl.MaxVersion - 20) +) + +// Object request verbs +type ObjectSessionContext_Verb int32 + +const ( + // Unknown verb + ObjectSessionContext_VERB_UNSPECIFIED ObjectSessionContext_Verb = 0 + // Refers to object.Put RPC call + ObjectSessionContext_PUT ObjectSessionContext_Verb = 1 + // Refers to object.Get RPC call + ObjectSessionContext_GET ObjectSessionContext_Verb = 2 + // Refers to object.Head RPC call + ObjectSessionContext_HEAD ObjectSessionContext_Verb = 3 + // Refers to object.Search RPC call + ObjectSessionContext_SEARCH ObjectSessionContext_Verb = 4 + // Refers to object.Delete RPC call + ObjectSessionContext_DELETE ObjectSessionContext_Verb = 5 + // Refers to object.GetRange RPC call + ObjectSessionContext_RANGE ObjectSessionContext_Verb = 6 + // Refers to object.GetRangeHash RPC call + ObjectSessionContext_RANGEHASH ObjectSessionContext_Verb = 7 +) + +// Enum value maps for ObjectSessionContext_Verb. +var ( + ObjectSessionContext_Verb_name = map[int32]string{ + 0: "VERB_UNSPECIFIED", + 1: "PUT", + 2: "GET", + 3: "HEAD", + 4: "SEARCH", + 5: "DELETE", + 6: "RANGE", + 7: "RANGEHASH", + } + ObjectSessionContext_Verb_value = map[string]int32{ + "VERB_UNSPECIFIED": 0, + "PUT": 1, + "GET": 2, + "HEAD": 3, + "SEARCH": 4, + "DELETE": 5, + "RANGE": 6, + "RANGEHASH": 7, + } +) + +func (x ObjectSessionContext_Verb) Enum() *ObjectSessionContext_Verb { + p := new(ObjectSessionContext_Verb) + *p = x + return p +} + +func (x ObjectSessionContext_Verb) String() string { + return protoimpl.X.EnumStringOf(x.Descriptor(), protoreflect.EnumNumber(x)) +} + +func (ObjectSessionContext_Verb) Descriptor() protoreflect.EnumDescriptor { + return file_session_grpc_types_proto_enumTypes[0].Descriptor() +} + +func (ObjectSessionContext_Verb) Type() protoreflect.EnumType { + return &file_session_grpc_types_proto_enumTypes[0] +} + +func (x ObjectSessionContext_Verb) Number() protoreflect.EnumNumber { + return protoreflect.EnumNumber(x) +} + +// Deprecated: Use ObjectSessionContext_Verb.Descriptor instead. +func (ObjectSessionContext_Verb) EnumDescriptor() ([]byte, []int) { + return file_session_grpc_types_proto_rawDescGZIP(), []int{0, 0} +} + +// Container request verbs +type ContainerSessionContext_Verb int32 + +const ( + // Unknown verb + ContainerSessionContext_VERB_UNSPECIFIED ContainerSessionContext_Verb = 0 + // Refers to container.Put RPC call + ContainerSessionContext_PUT ContainerSessionContext_Verb = 1 + // Refers to container.Delete RPC call + ContainerSessionContext_DELETE ContainerSessionContext_Verb = 2 + // Refers to container.SetExtendedACL RPC call + ContainerSessionContext_SETEACL ContainerSessionContext_Verb = 3 +) + +// Enum value maps for ContainerSessionContext_Verb. +var ( + ContainerSessionContext_Verb_name = map[int32]string{ + 0: "VERB_UNSPECIFIED", + 1: "PUT", + 2: "DELETE", + 3: "SETEACL", + } + ContainerSessionContext_Verb_value = map[string]int32{ + "VERB_UNSPECIFIED": 0, + "PUT": 1, + "DELETE": 2, + "SETEACL": 3, + } +) + +func (x ContainerSessionContext_Verb) Enum() *ContainerSessionContext_Verb { + p := new(ContainerSessionContext_Verb) + *p = x + return p +} + +func (x ContainerSessionContext_Verb) String() string { + return protoimpl.X.EnumStringOf(x.Descriptor(), protoreflect.EnumNumber(x)) +} + +func (ContainerSessionContext_Verb) Descriptor() protoreflect.EnumDescriptor { + return file_session_grpc_types_proto_enumTypes[1].Descriptor() +} + +func (ContainerSessionContext_Verb) Type() protoreflect.EnumType { + return &file_session_grpc_types_proto_enumTypes[1] +} + +func (x ContainerSessionContext_Verb) Number() protoreflect.EnumNumber { + return protoreflect.EnumNumber(x) +} + +// Deprecated: Use ContainerSessionContext_Verb.Descriptor instead. +func (ContainerSessionContext_Verb) EnumDescriptor() ([]byte, []int) { + return file_session_grpc_types_proto_rawDescGZIP(), []int{1, 0} +} + +// Context information for Session Tokens related to ObjectService requests +type ObjectSessionContext struct { + state protoimpl.MessageState + sizeCache protoimpl.SizeCache + unknownFields protoimpl.UnknownFields + + // Type of request for which the token is issued + Verb ObjectSessionContext_Verb `protobuf:"varint,1,opt,name=verb,proto3,enum=neo.fs.v2.session.ObjectSessionContext_Verb" json:"verb,omitempty"` + // Object session target. MUST be correctly formed and set. If `objects` + // field is not empty, then the session applies only to these elements, + // otherwise, to all objects from the specified container. + Target *ObjectSessionContext_Target `protobuf:"bytes,2,opt,name=target,proto3" json:"target,omitempty"` +} + +func (x *ObjectSessionContext) Reset() { + *x = ObjectSessionContext{} + if protoimpl.UnsafeEnabled { + mi := &file_session_grpc_types_proto_msgTypes[0] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) + } +} + +func (x *ObjectSessionContext) String() string { + return protoimpl.X.MessageStringOf(x) +} + +func (*ObjectSessionContext) ProtoMessage() {} + +func (x *ObjectSessionContext) ProtoReflect() protoreflect.Message { + mi := &file_session_grpc_types_proto_msgTypes[0] + if protoimpl.UnsafeEnabled && x != nil { + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + if ms.LoadMessageInfo() == nil { + ms.StoreMessageInfo(mi) + } + return ms + } + return mi.MessageOf(x) +} + +// Deprecated: Use ObjectSessionContext.ProtoReflect.Descriptor instead. +func (*ObjectSessionContext) Descriptor() ([]byte, []int) { + return file_session_grpc_types_proto_rawDescGZIP(), []int{0} +} + +func (x *ObjectSessionContext) GetVerb() ObjectSessionContext_Verb { + if x != nil { + return x.Verb + } + return ObjectSessionContext_VERB_UNSPECIFIED +} + +func (x *ObjectSessionContext) GetTarget() *ObjectSessionContext_Target { + if x != nil { + return x.Target + } + return nil +} + +// Context information for Session Tokens related to ContainerService requests. +type ContainerSessionContext struct { + state protoimpl.MessageState + sizeCache protoimpl.SizeCache + unknownFields protoimpl.UnknownFields + + // Type of request for which the token is issued + Verb ContainerSessionContext_Verb `protobuf:"varint,1,opt,name=verb,proto3,enum=neo.fs.v2.session.ContainerSessionContext_Verb" json:"verb,omitempty"` + // Spreads the action to all owner containers. + // If set, container_id field is ignored. + Wildcard bool `protobuf:"varint,2,opt,name=wildcard,proto3" json:"wildcard,omitempty"` + // Particular container to which the action applies. + // Ignored if wildcard flag is set. + ContainerId *refs.ContainerID `protobuf:"bytes,3,opt,name=container_id,json=containerID,proto3" json:"container_id,omitempty"` +} + +func (x *ContainerSessionContext) Reset() { + *x = ContainerSessionContext{} + if protoimpl.UnsafeEnabled { + mi := &file_session_grpc_types_proto_msgTypes[1] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) + } +} + +func (x *ContainerSessionContext) String() string { + return protoimpl.X.MessageStringOf(x) +} + +func (*ContainerSessionContext) ProtoMessage() {} + +func (x *ContainerSessionContext) ProtoReflect() protoreflect.Message { + mi := &file_session_grpc_types_proto_msgTypes[1] + if protoimpl.UnsafeEnabled && x != nil { + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + if ms.LoadMessageInfo() == nil { + ms.StoreMessageInfo(mi) + } + return ms + } + return mi.MessageOf(x) +} + +// Deprecated: Use ContainerSessionContext.ProtoReflect.Descriptor instead. +func (*ContainerSessionContext) Descriptor() ([]byte, []int) { + return file_session_grpc_types_proto_rawDescGZIP(), []int{1} +} + +func (x *ContainerSessionContext) GetVerb() ContainerSessionContext_Verb { + if x != nil { + return x.Verb + } + return ContainerSessionContext_VERB_UNSPECIFIED +} + +func (x *ContainerSessionContext) GetWildcard() bool { + if x != nil { + return x.Wildcard + } + return false +} + +func (x *ContainerSessionContext) GetContainerId() *refs.ContainerID { + if x != nil { + return x.ContainerId + } + return nil +} + +// NeoFS Session Token. +type SessionToken struct { + state protoimpl.MessageState + sizeCache protoimpl.SizeCache + unknownFields protoimpl.UnknownFields + + // Session Token contains the proof of trust between peers to be attached in + // requests for further verification. Please see corresponding section of + // NeoFS Technical Specification for details. + Body *SessionToken_Body `protobuf:"bytes,1,opt,name=body,proto3" json:"body,omitempty"` + // Signature of `SessionToken` information + Signature *refs.Signature `protobuf:"bytes,2,opt,name=signature,proto3" json:"signature,omitempty"` +} + +func (x *SessionToken) Reset() { + *x = SessionToken{} + if protoimpl.UnsafeEnabled { + mi := &file_session_grpc_types_proto_msgTypes[2] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) + } +} + +func (x *SessionToken) String() string { + return protoimpl.X.MessageStringOf(x) +} + +func (*SessionToken) ProtoMessage() {} + +func (x *SessionToken) ProtoReflect() protoreflect.Message { + mi := &file_session_grpc_types_proto_msgTypes[2] + if protoimpl.UnsafeEnabled && x != nil { + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + if ms.LoadMessageInfo() == nil { + ms.StoreMessageInfo(mi) + } + return ms + } + return mi.MessageOf(x) +} + +// Deprecated: Use SessionToken.ProtoReflect.Descriptor instead. +func (*SessionToken) Descriptor() ([]byte, []int) { + return file_session_grpc_types_proto_rawDescGZIP(), []int{2} +} + +func (x *SessionToken) GetBody() *SessionToken_Body { + if x != nil { + return x.Body + } + return nil +} + +func (x *SessionToken) GetSignature() *refs.Signature { + if x != nil { + return x.Signature + } + return nil +} + +// Extended headers for Request/Response. They may contain any user-defined headers +// to be interpreted on application level. +// +// Key name must be a unique valid UTF-8 string. Value can't be empty. Requests or +// Responses with duplicated header names or headers with empty values will be +// considered invalid. +// +// There are some "well-known" headers starting with `__NEOFS__` prefix that +// affect system behaviour: +// +// - __NEOFS__NETMAP_EPOCH \ +// Netmap epoch to use for object placement calculation. The `value` is string +// encoded `uint64` in decimal presentation. If set to '0' or not set, the +// current epoch only will be used. DEPRECATED: header ignored by servers. +// - __NEOFS__NETMAP_LOOKUP_DEPTH \ +// If object can't be found using current epoch's netmap, this header limits +// how many past epochs the node can look up through. The `value` is string +// encoded `uint64` in decimal presentation. If set to '0' or not set, only the +// current epoch will be used. DEPRECATED: header ignored by servers. +type XHeader struct { + state protoimpl.MessageState + sizeCache protoimpl.SizeCache + unknownFields protoimpl.UnknownFields + + // Key of the X-Header + Key string `protobuf:"bytes,1,opt,name=key,proto3" json:"key,omitempty"` + // Value of the X-Header + Value string `protobuf:"bytes,2,opt,name=value,proto3" json:"value,omitempty"` +} + +func (x *XHeader) Reset() { + *x = XHeader{} + if protoimpl.UnsafeEnabled { + mi := &file_session_grpc_types_proto_msgTypes[3] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) + } +} + +func (x *XHeader) String() string { + return protoimpl.X.MessageStringOf(x) +} + +func (*XHeader) ProtoMessage() {} + +func (x *XHeader) ProtoReflect() protoreflect.Message { + mi := &file_session_grpc_types_proto_msgTypes[3] + if protoimpl.UnsafeEnabled && x != nil { + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + if ms.LoadMessageInfo() == nil { + ms.StoreMessageInfo(mi) + } + return ms + } + return mi.MessageOf(x) +} + +// Deprecated: Use XHeader.ProtoReflect.Descriptor instead. +func (*XHeader) Descriptor() ([]byte, []int) { + return file_session_grpc_types_proto_rawDescGZIP(), []int{3} +} + +func (x *XHeader) GetKey() string { + if x != nil { + return x.Key + } + return "" +} + +func (x *XHeader) GetValue() string { + if x != nil { + return x.Value + } + return "" +} + +// Meta information attached to the request. When forwarded between peers, +// request meta headers are folded in matryoshka style. +type RequestMetaHeader struct { + state protoimpl.MessageState + sizeCache protoimpl.SizeCache + unknownFields protoimpl.UnknownFields + + // Peer's API version used + Version *refs.Version `protobuf:"bytes,1,opt,name=version,proto3" json:"version,omitempty"` + // Peer's local epoch number. Set to 0 if unknown. + Epoch uint64 `protobuf:"varint,2,opt,name=epoch,proto3" json:"epoch,omitempty"` + // Maximum number of intermediate nodes in the request route + Ttl uint32 `protobuf:"varint,3,opt,name=ttl,proto3" json:"ttl,omitempty"` + // Request X-Headers + XHeaders []*XHeader `protobuf:"bytes,4,rep,name=x_headers,json=xHeaders,proto3" json:"x_headers,omitempty"` + // Session token within which the request is sent + SessionToken *SessionToken `protobuf:"bytes,5,opt,name=session_token,json=sessionToken,proto3" json:"session_token,omitempty"` + // `BearerToken` with eACL overrides for the request + BearerToken *acl.BearerToken `protobuf:"bytes,6,opt,name=bearer_token,json=bearerToken,proto3" json:"bearer_token,omitempty"` + // `RequestMetaHeader` of the origin request + Origin *RequestMetaHeader `protobuf:"bytes,7,opt,name=origin,proto3" json:"origin,omitempty"` + // NeoFS network magic. Must match the value for the network + // that the server belongs to. + MagicNumber uint64 `protobuf:"varint,8,opt,name=magic_number,json=magicNumber,proto3" json:"magic_number,omitempty"` +} + +func (x *RequestMetaHeader) Reset() { + *x = RequestMetaHeader{} + if protoimpl.UnsafeEnabled { + mi := &file_session_grpc_types_proto_msgTypes[4] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) + } +} + +func (x *RequestMetaHeader) String() string { + return protoimpl.X.MessageStringOf(x) +} + +func (*RequestMetaHeader) ProtoMessage() {} + +func (x *RequestMetaHeader) ProtoReflect() protoreflect.Message { + mi := &file_session_grpc_types_proto_msgTypes[4] + if protoimpl.UnsafeEnabled && x != nil { + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + if ms.LoadMessageInfo() == nil { + ms.StoreMessageInfo(mi) + } + return ms + } + return mi.MessageOf(x) +} + +// Deprecated: Use RequestMetaHeader.ProtoReflect.Descriptor instead. +func (*RequestMetaHeader) Descriptor() ([]byte, []int) { + return file_session_grpc_types_proto_rawDescGZIP(), []int{4} +} + +func (x *RequestMetaHeader) GetVersion() *refs.Version { + if x != nil { + return x.Version + } + return nil +} + +func (x *RequestMetaHeader) GetEpoch() uint64 { + if x != nil { + return x.Epoch + } + return 0 +} + +func (x *RequestMetaHeader) GetTtl() uint32 { + if x != nil { + return x.Ttl + } + return 0 +} + +func (x *RequestMetaHeader) GetXHeaders() []*XHeader { + if x != nil { + return x.XHeaders + } + return nil +} + +func (x *RequestMetaHeader) GetSessionToken() *SessionToken { + if x != nil { + return x.SessionToken + } + return nil +} + +func (x *RequestMetaHeader) GetBearerToken() *acl.BearerToken { + if x != nil { + return x.BearerToken + } + return nil +} + +func (x *RequestMetaHeader) GetOrigin() *RequestMetaHeader { + if x != nil { + return x.Origin + } + return nil +} + +func (x *RequestMetaHeader) GetMagicNumber() uint64 { + if x != nil { + return x.MagicNumber + } + return 0 +} + +// Information about the response +type ResponseMetaHeader struct { + state protoimpl.MessageState + sizeCache protoimpl.SizeCache + unknownFields protoimpl.UnknownFields + + // Peer's API version used + Version *refs.Version `protobuf:"bytes,1,opt,name=version,proto3" json:"version,omitempty"` + // Peer's local epoch number + Epoch uint64 `protobuf:"varint,2,opt,name=epoch,proto3" json:"epoch,omitempty"` + // Maximum number of intermediate nodes in the request route + Ttl uint32 `protobuf:"varint,3,opt,name=ttl,proto3" json:"ttl,omitempty"` + // Response X-Headers + XHeaders []*XHeader `protobuf:"bytes,4,rep,name=x_headers,json=xHeaders,proto3" json:"x_headers,omitempty"` + // `ResponseMetaHeader` of the origin request + Origin *ResponseMetaHeader `protobuf:"bytes,5,opt,name=origin,proto3" json:"origin,omitempty"` + // Status return + Status *status.Status `protobuf:"bytes,6,opt,name=status,proto3" json:"status,omitempty"` +} + +func (x *ResponseMetaHeader) Reset() { + *x = ResponseMetaHeader{} + if protoimpl.UnsafeEnabled { + mi := &file_session_grpc_types_proto_msgTypes[5] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) + } +} + +func (x *ResponseMetaHeader) String() string { + return protoimpl.X.MessageStringOf(x) +} + +func (*ResponseMetaHeader) ProtoMessage() {} + +func (x *ResponseMetaHeader) ProtoReflect() protoreflect.Message { + mi := &file_session_grpc_types_proto_msgTypes[5] + if protoimpl.UnsafeEnabled && x != nil { + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + if ms.LoadMessageInfo() == nil { + ms.StoreMessageInfo(mi) + } + return ms + } + return mi.MessageOf(x) +} + +// Deprecated: Use ResponseMetaHeader.ProtoReflect.Descriptor instead. +func (*ResponseMetaHeader) Descriptor() ([]byte, []int) { + return file_session_grpc_types_proto_rawDescGZIP(), []int{5} +} + +func (x *ResponseMetaHeader) GetVersion() *refs.Version { + if x != nil { + return x.Version + } + return nil +} + +func (x *ResponseMetaHeader) GetEpoch() uint64 { + if x != nil { + return x.Epoch + } + return 0 +} + +func (x *ResponseMetaHeader) GetTtl() uint32 { + if x != nil { + return x.Ttl + } + return 0 +} + +func (x *ResponseMetaHeader) GetXHeaders() []*XHeader { + if x != nil { + return x.XHeaders + } + return nil +} + +func (x *ResponseMetaHeader) GetOrigin() *ResponseMetaHeader { + if x != nil { + return x.Origin + } + return nil +} + +func (x *ResponseMetaHeader) GetStatus() *status.Status { + if x != nil { + return x.Status + } + return nil +} + +// Verification info for the request signed by all intermediate nodes. +type RequestVerificationHeader struct { + state protoimpl.MessageState + sizeCache protoimpl.SizeCache + unknownFields protoimpl.UnknownFields + + // Request Body signature. Should be generated once by the request initiator. + BodySignature *refs.Signature `protobuf:"bytes,1,opt,name=body_signature,json=bodySignature,proto3" json:"body_signature,omitempty"` + // Request Meta signature is added and signed by each intermediate node + MetaSignature *refs.Signature `protobuf:"bytes,2,opt,name=meta_signature,json=metaSignature,proto3" json:"meta_signature,omitempty"` + // Signature of previous hops + OriginSignature *refs.Signature `protobuf:"bytes,3,opt,name=origin_signature,json=originSignature,proto3" json:"origin_signature,omitempty"` + // Chain of previous hops signatures + Origin *RequestVerificationHeader `protobuf:"bytes,4,opt,name=origin,proto3" json:"origin,omitempty"` +} + +func (x *RequestVerificationHeader) Reset() { + *x = RequestVerificationHeader{} + if protoimpl.UnsafeEnabled { + mi := &file_session_grpc_types_proto_msgTypes[6] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) + } +} + +func (x *RequestVerificationHeader) String() string { + return protoimpl.X.MessageStringOf(x) +} + +func (*RequestVerificationHeader) ProtoMessage() {} + +func (x *RequestVerificationHeader) ProtoReflect() protoreflect.Message { + mi := &file_session_grpc_types_proto_msgTypes[6] + if protoimpl.UnsafeEnabled && x != nil { + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + if ms.LoadMessageInfo() == nil { + ms.StoreMessageInfo(mi) + } + return ms + } + return mi.MessageOf(x) +} + +// Deprecated: Use RequestVerificationHeader.ProtoReflect.Descriptor instead. +func (*RequestVerificationHeader) Descriptor() ([]byte, []int) { + return file_session_grpc_types_proto_rawDescGZIP(), []int{6} +} + +func (x *RequestVerificationHeader) GetBodySignature() *refs.Signature { + if x != nil { + return x.BodySignature + } + return nil +} + +func (x *RequestVerificationHeader) GetMetaSignature() *refs.Signature { + if x != nil { + return x.MetaSignature + } + return nil +} + +func (x *RequestVerificationHeader) GetOriginSignature() *refs.Signature { + if x != nil { + return x.OriginSignature + } + return nil +} + +func (x *RequestVerificationHeader) GetOrigin() *RequestVerificationHeader { + if x != nil { + return x.Origin + } + return nil +} + +// Verification info for the response signed by all intermediate nodes +type ResponseVerificationHeader struct { + state protoimpl.MessageState + sizeCache protoimpl.SizeCache + unknownFields protoimpl.UnknownFields + + // Response Body signature. Should be generated once by an answering node. + BodySignature *refs.Signature `protobuf:"bytes,1,opt,name=body_signature,json=bodySignature,proto3" json:"body_signature,omitempty"` + // Response Meta signature is added and signed by each intermediate node + MetaSignature *refs.Signature `protobuf:"bytes,2,opt,name=meta_signature,json=metaSignature,proto3" json:"meta_signature,omitempty"` + // Signature of previous hops + OriginSignature *refs.Signature `protobuf:"bytes,3,opt,name=origin_signature,json=originSignature,proto3" json:"origin_signature,omitempty"` + // Chain of previous hops signatures + Origin *ResponseVerificationHeader `protobuf:"bytes,4,opt,name=origin,proto3" json:"origin,omitempty"` +} + +func (x *ResponseVerificationHeader) Reset() { + *x = ResponseVerificationHeader{} + if protoimpl.UnsafeEnabled { + mi := &file_session_grpc_types_proto_msgTypes[7] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) + } +} + +func (x *ResponseVerificationHeader) String() string { + return protoimpl.X.MessageStringOf(x) +} + +func (*ResponseVerificationHeader) ProtoMessage() {} + +func (x *ResponseVerificationHeader) ProtoReflect() protoreflect.Message { + mi := &file_session_grpc_types_proto_msgTypes[7] + if protoimpl.UnsafeEnabled && x != nil { + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + if ms.LoadMessageInfo() == nil { + ms.StoreMessageInfo(mi) + } + return ms + } + return mi.MessageOf(x) +} + +// Deprecated: Use ResponseVerificationHeader.ProtoReflect.Descriptor instead. +func (*ResponseVerificationHeader) Descriptor() ([]byte, []int) { + return file_session_grpc_types_proto_rawDescGZIP(), []int{7} +} + +func (x *ResponseVerificationHeader) GetBodySignature() *refs.Signature { + if x != nil { + return x.BodySignature + } + return nil +} + +func (x *ResponseVerificationHeader) GetMetaSignature() *refs.Signature { + if x != nil { + return x.MetaSignature + } + return nil +} + +func (x *ResponseVerificationHeader) GetOriginSignature() *refs.Signature { + if x != nil { + return x.OriginSignature + } + return nil +} + +func (x *ResponseVerificationHeader) GetOrigin() *ResponseVerificationHeader { + if x != nil { + return x.Origin + } + return nil +} + +// Carries objects involved in the object session. +type ObjectSessionContext_Target struct { + state protoimpl.MessageState + sizeCache protoimpl.SizeCache + unknownFields protoimpl.UnknownFields + + // Indicates which container the session is spread to. Field MUST be set + // and correct. + Container *refs.ContainerID `protobuf:"bytes,1,opt,name=container,proto3" json:"container,omitempty"` + // Indicates which objects the session is spread to. Objects are expected + // to be stored in the NeoFS container referenced by `container` field. + // Each element MUST have correct format. + Objects []*refs.ObjectID `protobuf:"bytes,2,rep,name=objects,proto3" json:"objects,omitempty"` +} + +func (x *ObjectSessionContext_Target) Reset() { + *x = ObjectSessionContext_Target{} + if protoimpl.UnsafeEnabled { + mi := &file_session_grpc_types_proto_msgTypes[8] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) + } +} + +func (x *ObjectSessionContext_Target) String() string { + return protoimpl.X.MessageStringOf(x) +} + +func (*ObjectSessionContext_Target) ProtoMessage() {} + +func (x *ObjectSessionContext_Target) ProtoReflect() protoreflect.Message { + mi := &file_session_grpc_types_proto_msgTypes[8] + if protoimpl.UnsafeEnabled && x != nil { + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + if ms.LoadMessageInfo() == nil { + ms.StoreMessageInfo(mi) + } + return ms + } + return mi.MessageOf(x) +} + +// Deprecated: Use ObjectSessionContext_Target.ProtoReflect.Descriptor instead. +func (*ObjectSessionContext_Target) Descriptor() ([]byte, []int) { + return file_session_grpc_types_proto_rawDescGZIP(), []int{0, 0} +} + +func (x *ObjectSessionContext_Target) GetContainer() *refs.ContainerID { + if x != nil { + return x.Container + } + return nil +} + +func (x *ObjectSessionContext_Target) GetObjects() []*refs.ObjectID { + if x != nil { + return x.Objects + } + return nil +} + +// Session Token body +type SessionToken_Body struct { + state protoimpl.MessageState + sizeCache protoimpl.SizeCache + unknownFields protoimpl.UnknownFields + + // Token identifier is a valid UUIDv4 in binary form + Id []byte `protobuf:"bytes,1,opt,name=id,proto3" json:"id,omitempty"` + // Identifier of the session initiator + OwnerId *refs.OwnerID `protobuf:"bytes,2,opt,name=owner_id,json=ownerID,proto3" json:"owner_id,omitempty"` + // Lifetime of the session + Lifetime *SessionToken_Body_TokenLifetime `protobuf:"bytes,3,opt,name=lifetime,proto3" json:"lifetime,omitempty"` + // Public key used in session + SessionKey []byte `protobuf:"bytes,4,opt,name=session_key,json=sessionKey,proto3" json:"session_key,omitempty"` + // Session Context information + // + // Types that are assignable to Context: + // + // *SessionToken_Body_Object + // *SessionToken_Body_Container + Context isSessionToken_Body_Context `protobuf_oneof:"context"` +} + +func (x *SessionToken_Body) Reset() { + *x = SessionToken_Body{} + if protoimpl.UnsafeEnabled { + mi := &file_session_grpc_types_proto_msgTypes[9] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) + } +} + +func (x *SessionToken_Body) String() string { + return protoimpl.X.MessageStringOf(x) +} + +func (*SessionToken_Body) ProtoMessage() {} + +func (x *SessionToken_Body) ProtoReflect() protoreflect.Message { + mi := &file_session_grpc_types_proto_msgTypes[9] + if protoimpl.UnsafeEnabled && x != nil { + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + if ms.LoadMessageInfo() == nil { + ms.StoreMessageInfo(mi) + } + return ms + } + return mi.MessageOf(x) +} + +// Deprecated: Use SessionToken_Body.ProtoReflect.Descriptor instead. +func (*SessionToken_Body) Descriptor() ([]byte, []int) { + return file_session_grpc_types_proto_rawDescGZIP(), []int{2, 0} +} + +func (x *SessionToken_Body) GetId() []byte { + if x != nil { + return x.Id + } + return nil +} + +func (x *SessionToken_Body) GetOwnerId() *refs.OwnerID { + if x != nil { + return x.OwnerId + } + return nil +} + +func (x *SessionToken_Body) GetLifetime() *SessionToken_Body_TokenLifetime { + if x != nil { + return x.Lifetime + } + return nil +} + +func (x *SessionToken_Body) GetSessionKey() []byte { + if x != nil { + return x.SessionKey + } + return nil +} + +func (m *SessionToken_Body) GetContext() isSessionToken_Body_Context { + if m != nil { + return m.Context + } + return nil +} + +func (x *SessionToken_Body) GetObject() *ObjectSessionContext { + if x, ok := x.GetContext().(*SessionToken_Body_Object); ok { + return x.Object + } + return nil +} + +func (x *SessionToken_Body) GetContainer() *ContainerSessionContext { + if x, ok := x.GetContext().(*SessionToken_Body_Container); ok { + return x.Container + } + return nil +} + +type isSessionToken_Body_Context interface { + isSessionToken_Body_Context() +} + +type SessionToken_Body_Object struct { + // ObjectService session context + Object *ObjectSessionContext `protobuf:"bytes,5,opt,name=object,proto3,oneof"` +} + +type SessionToken_Body_Container struct { + // ContainerService session context + Container *ContainerSessionContext `protobuf:"bytes,6,opt,name=container,proto3,oneof"` +} + +func (*SessionToken_Body_Object) isSessionToken_Body_Context() {} + +func (*SessionToken_Body_Container) isSessionToken_Body_Context() {} + +// Lifetime parameters of the token. Field names taken from rfc7519. +type SessionToken_Body_TokenLifetime struct { + state protoimpl.MessageState + sizeCache protoimpl.SizeCache + unknownFields protoimpl.UnknownFields + + // Expiration Epoch + Exp uint64 `protobuf:"varint,1,opt,name=exp,proto3" json:"exp,omitempty"` + // Not valid before Epoch + Nbf uint64 `protobuf:"varint,2,opt,name=nbf,proto3" json:"nbf,omitempty"` + // Issued at Epoch + Iat uint64 `protobuf:"varint,3,opt,name=iat,proto3" json:"iat,omitempty"` +} + +func (x *SessionToken_Body_TokenLifetime) Reset() { + *x = SessionToken_Body_TokenLifetime{} + if protoimpl.UnsafeEnabled { + mi := &file_session_grpc_types_proto_msgTypes[10] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) + } +} + +func (x *SessionToken_Body_TokenLifetime) String() string { + return protoimpl.X.MessageStringOf(x) +} + +func (*SessionToken_Body_TokenLifetime) ProtoMessage() {} + +func (x *SessionToken_Body_TokenLifetime) ProtoReflect() protoreflect.Message { + mi := &file_session_grpc_types_proto_msgTypes[10] + if protoimpl.UnsafeEnabled && x != nil { + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + if ms.LoadMessageInfo() == nil { + ms.StoreMessageInfo(mi) + } + return ms + } + return mi.MessageOf(x) +} + +// Deprecated: Use SessionToken_Body_TokenLifetime.ProtoReflect.Descriptor instead. +func (*SessionToken_Body_TokenLifetime) Descriptor() ([]byte, []int) { + return file_session_grpc_types_proto_rawDescGZIP(), []int{2, 0, 0} +} + +func (x *SessionToken_Body_TokenLifetime) GetExp() uint64 { + if x != nil { + return x.Exp + } + return 0 +} + +func (x *SessionToken_Body_TokenLifetime) GetNbf() uint64 { + if x != nil { + return x.Nbf + } + return 0 +} + +func (x *SessionToken_Body_TokenLifetime) GetIat() uint64 { + if x != nil { + return x.Iat + } + return 0 +} + +var File_session_grpc_types_proto protoreflect.FileDescriptor + +var file_session_grpc_types_proto_rawDesc = []byte{ + 0x0a, 0x18, 0x73, 0x65, 0x73, 0x73, 0x69, 0x6f, 0x6e, 0x2f, 0x67, 0x72, 0x70, 0x63, 0x2f, 0x74, + 0x79, 0x70, 0x65, 0x73, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x12, 0x11, 0x6e, 0x65, 0x6f, 0x2e, + 0x66, 0x73, 0x2e, 0x76, 0x32, 0x2e, 0x73, 0x65, 0x73, 0x73, 0x69, 0x6f, 0x6e, 0x1a, 0x15, 0x72, + 0x65, 0x66, 0x73, 0x2f, 0x67, 0x72, 0x70, 0x63, 0x2f, 0x74, 0x79, 0x70, 0x65, 0x73, 0x2e, 0x70, + 0x72, 0x6f, 0x74, 0x6f, 0x1a, 0x14, 0x61, 0x63, 0x6c, 0x2f, 0x67, 0x72, 0x70, 0x63, 0x2f, 0x74, + 0x79, 0x70, 0x65, 0x73, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x1a, 0x17, 0x73, 0x74, 0x61, 0x74, + 0x75, 0x73, 0x2f, 0x67, 0x72, 0x70, 0x63, 0x2f, 0x74, 0x79, 0x70, 0x65, 0x73, 0x2e, 0x70, 0x72, + 0x6f, 0x74, 0x6f, 0x22, 0x85, 0x03, 0x0a, 0x14, 0x4f, 0x62, 0x6a, 0x65, 0x63, 0x74, 0x53, 0x65, + 0x73, 0x73, 0x69, 0x6f, 0x6e, 0x43, 0x6f, 0x6e, 0x74, 0x65, 0x78, 0x74, 0x12, 0x40, 0x0a, 0x04, + 0x76, 0x65, 0x72, 0x62, 0x18, 0x01, 0x20, 0x01, 0x28, 0x0e, 0x32, 0x2c, 0x2e, 0x6e, 0x65, 0x6f, + 0x2e, 0x66, 0x73, 0x2e, 0x76, 0x32, 0x2e, 0x73, 0x65, 0x73, 0x73, 0x69, 0x6f, 0x6e, 0x2e, 0x4f, + 0x62, 0x6a, 0x65, 0x63, 0x74, 0x53, 0x65, 0x73, 0x73, 0x69, 0x6f, 0x6e, 0x43, 0x6f, 0x6e, 0x74, + 0x65, 0x78, 0x74, 0x2e, 0x56, 0x65, 0x72, 0x62, 0x52, 0x04, 0x76, 0x65, 0x72, 0x62, 0x12, 0x46, + 0x0a, 0x06, 0x74, 0x61, 0x72, 0x67, 0x65, 0x74, 0x18, 0x02, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x2e, + 0x2e, 0x6e, 0x65, 0x6f, 0x2e, 0x66, 0x73, 0x2e, 0x76, 0x32, 0x2e, 0x73, 0x65, 0x73, 0x73, 0x69, + 0x6f, 0x6e, 0x2e, 0x4f, 0x62, 0x6a, 0x65, 0x63, 0x74, 0x53, 0x65, 0x73, 0x73, 0x69, 0x6f, 0x6e, + 0x43, 0x6f, 0x6e, 0x74, 0x65, 0x78, 0x74, 0x2e, 0x54, 0x61, 0x72, 0x67, 0x65, 0x74, 0x52, 0x06, + 0x74, 0x61, 0x72, 0x67, 0x65, 0x74, 0x1a, 0x77, 0x0a, 0x06, 0x54, 0x61, 0x72, 0x67, 0x65, 0x74, + 0x12, 0x39, 0x0a, 0x09, 0x63, 0x6f, 0x6e, 0x74, 0x61, 0x69, 0x6e, 0x65, 0x72, 0x18, 0x01, 0x20, + 0x01, 0x28, 0x0b, 0x32, 0x1b, 0x2e, 0x6e, 0x65, 0x6f, 0x2e, 0x66, 0x73, 0x2e, 0x76, 0x32, 0x2e, + 0x72, 0x65, 0x66, 0x73, 0x2e, 0x43, 0x6f, 0x6e, 0x74, 0x61, 0x69, 0x6e, 0x65, 0x72, 0x49, 0x44, + 0x52, 0x09, 0x63, 0x6f, 0x6e, 0x74, 0x61, 0x69, 0x6e, 0x65, 0x72, 0x12, 0x32, 0x0a, 0x07, 0x6f, + 0x62, 0x6a, 0x65, 0x63, 0x74, 0x73, 0x18, 0x02, 0x20, 0x03, 0x28, 0x0b, 0x32, 0x18, 0x2e, 0x6e, + 0x65, 0x6f, 0x2e, 0x66, 0x73, 0x2e, 0x76, 0x32, 0x2e, 0x72, 0x65, 0x66, 0x73, 0x2e, 0x4f, 0x62, + 0x6a, 0x65, 0x63, 0x74, 0x49, 0x44, 0x52, 0x07, 0x6f, 0x62, 0x6a, 0x65, 0x63, 0x74, 0x73, 0x22, + 0x6a, 0x0a, 0x04, 0x56, 0x65, 0x72, 0x62, 0x12, 0x14, 0x0a, 0x10, 0x56, 0x45, 0x52, 0x42, 0x5f, + 0x55, 0x4e, 0x53, 0x50, 0x45, 0x43, 0x49, 0x46, 0x49, 0x45, 0x44, 0x10, 0x00, 0x12, 0x07, 0x0a, + 0x03, 0x50, 0x55, 0x54, 0x10, 0x01, 0x12, 0x07, 0x0a, 0x03, 0x47, 0x45, 0x54, 0x10, 0x02, 0x12, + 0x08, 0x0a, 0x04, 0x48, 0x45, 0x41, 0x44, 0x10, 0x03, 0x12, 0x0a, 0x0a, 0x06, 0x53, 0x45, 0x41, + 0x52, 0x43, 0x48, 0x10, 0x04, 0x12, 0x0a, 0x0a, 0x06, 0x44, 0x45, 0x4c, 0x45, 0x54, 0x45, 0x10, + 0x05, 0x12, 0x09, 0x0a, 0x05, 0x52, 0x41, 0x4e, 0x47, 0x45, 0x10, 0x06, 0x12, 0x0d, 0x0a, 0x09, + 0x52, 0x41, 0x4e, 0x47, 0x45, 0x48, 0x41, 0x53, 0x48, 0x10, 0x07, 0x22, 0xfa, 0x01, 0x0a, 0x17, + 0x43, 0x6f, 0x6e, 0x74, 0x61, 0x69, 0x6e, 0x65, 0x72, 0x53, 0x65, 0x73, 0x73, 0x69, 0x6f, 0x6e, + 0x43, 0x6f, 0x6e, 0x74, 0x65, 0x78, 0x74, 0x12, 0x43, 0x0a, 0x04, 0x76, 0x65, 0x72, 0x62, 0x18, + 0x01, 0x20, 0x01, 0x28, 0x0e, 0x32, 0x2f, 0x2e, 0x6e, 0x65, 0x6f, 0x2e, 0x66, 0x73, 0x2e, 0x76, + 0x32, 0x2e, 0x73, 0x65, 0x73, 0x73, 0x69, 0x6f, 0x6e, 0x2e, 0x43, 0x6f, 0x6e, 0x74, 0x61, 0x69, + 0x6e, 0x65, 0x72, 0x53, 0x65, 0x73, 0x73, 0x69, 0x6f, 0x6e, 0x43, 0x6f, 0x6e, 0x74, 0x65, 0x78, + 0x74, 0x2e, 0x56, 0x65, 0x72, 0x62, 0x52, 0x04, 0x76, 0x65, 0x72, 0x62, 0x12, 0x1a, 0x0a, 0x08, + 0x77, 0x69, 0x6c, 0x64, 0x63, 0x61, 0x72, 0x64, 0x18, 0x02, 0x20, 0x01, 0x28, 0x08, 0x52, 0x08, + 0x77, 0x69, 0x6c, 0x64, 0x63, 0x61, 0x72, 0x64, 0x12, 0x3e, 0x0a, 0x0c, 0x63, 0x6f, 0x6e, 0x74, + 0x61, 0x69, 0x6e, 0x65, 0x72, 0x5f, 0x69, 0x64, 0x18, 0x03, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x1b, + 0x2e, 0x6e, 0x65, 0x6f, 0x2e, 0x66, 0x73, 0x2e, 0x76, 0x32, 0x2e, 0x72, 0x65, 0x66, 0x73, 0x2e, + 0x43, 0x6f, 0x6e, 0x74, 0x61, 0x69, 0x6e, 0x65, 0x72, 0x49, 0x44, 0x52, 0x0b, 0x63, 0x6f, 0x6e, + 0x74, 0x61, 0x69, 0x6e, 0x65, 0x72, 0x49, 0x44, 0x22, 0x3e, 0x0a, 0x04, 0x56, 0x65, 0x72, 0x62, + 0x12, 0x14, 0x0a, 0x10, 0x56, 0x45, 0x52, 0x42, 0x5f, 0x55, 0x4e, 0x53, 0x50, 0x45, 0x43, 0x49, + 0x46, 0x49, 0x45, 0x44, 0x10, 0x00, 0x12, 0x07, 0x0a, 0x03, 0x50, 0x55, 0x54, 0x10, 0x01, 0x12, + 0x0a, 0x0a, 0x06, 0x44, 0x45, 0x4c, 0x45, 0x54, 0x45, 0x10, 0x02, 0x12, 0x0b, 0x0a, 0x07, 0x53, + 0x45, 0x54, 0x45, 0x41, 0x43, 0x4c, 0x10, 0x03, 0x22, 0xa0, 0x04, 0x0a, 0x0c, 0x53, 0x65, 0x73, + 0x73, 0x69, 0x6f, 0x6e, 0x54, 0x6f, 0x6b, 0x65, 0x6e, 0x12, 0x38, 0x0a, 0x04, 0x62, 0x6f, 0x64, + 0x79, 0x18, 0x01, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x24, 0x2e, 0x6e, 0x65, 0x6f, 0x2e, 0x66, 0x73, + 0x2e, 0x76, 0x32, 0x2e, 0x73, 0x65, 0x73, 0x73, 0x69, 0x6f, 0x6e, 0x2e, 0x53, 0x65, 0x73, 0x73, + 0x69, 0x6f, 0x6e, 0x54, 0x6f, 0x6b, 0x65, 0x6e, 0x2e, 0x42, 0x6f, 0x64, 0x79, 0x52, 0x04, 0x62, + 0x6f, 0x64, 0x79, 0x12, 0x37, 0x0a, 0x09, 0x73, 0x69, 0x67, 0x6e, 0x61, 0x74, 0x75, 0x72, 0x65, + 0x18, 0x02, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x19, 0x2e, 0x6e, 0x65, 0x6f, 0x2e, 0x66, 0x73, 0x2e, + 0x76, 0x32, 0x2e, 0x72, 0x65, 0x66, 0x73, 0x2e, 0x53, 0x69, 0x67, 0x6e, 0x61, 0x74, 0x75, 0x72, + 0x65, 0x52, 0x09, 0x73, 0x69, 0x67, 0x6e, 0x61, 0x74, 0x75, 0x72, 0x65, 0x1a, 0x9c, 0x03, 0x0a, + 0x04, 0x42, 0x6f, 0x64, 0x79, 0x12, 0x0e, 0x0a, 0x02, 0x69, 0x64, 0x18, 0x01, 0x20, 0x01, 0x28, + 0x0c, 0x52, 0x02, 0x69, 0x64, 0x12, 0x32, 0x0a, 0x08, 0x6f, 0x77, 0x6e, 0x65, 0x72, 0x5f, 0x69, + 0x64, 0x18, 0x02, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x17, 0x2e, 0x6e, 0x65, 0x6f, 0x2e, 0x66, 0x73, + 0x2e, 0x76, 0x32, 0x2e, 0x72, 0x65, 0x66, 0x73, 0x2e, 0x4f, 0x77, 0x6e, 0x65, 0x72, 0x49, 0x44, + 0x52, 0x07, 0x6f, 0x77, 0x6e, 0x65, 0x72, 0x49, 0x44, 0x12, 0x4e, 0x0a, 0x08, 0x6c, 0x69, 0x66, + 0x65, 0x74, 0x69, 0x6d, 0x65, 0x18, 0x03, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x32, 0x2e, 0x6e, 0x65, + 0x6f, 0x2e, 0x66, 0x73, 0x2e, 0x76, 0x32, 0x2e, 0x73, 0x65, 0x73, 0x73, 0x69, 0x6f, 0x6e, 0x2e, + 0x53, 0x65, 0x73, 0x73, 0x69, 0x6f, 0x6e, 0x54, 0x6f, 0x6b, 0x65, 0x6e, 0x2e, 0x42, 0x6f, 0x64, + 0x79, 0x2e, 0x54, 0x6f, 0x6b, 0x65, 0x6e, 0x4c, 0x69, 0x66, 0x65, 0x74, 0x69, 0x6d, 0x65, 0x52, + 0x08, 0x6c, 0x69, 0x66, 0x65, 0x74, 0x69, 0x6d, 0x65, 0x12, 0x1f, 0x0a, 0x0b, 0x73, 0x65, 0x73, + 0x73, 0x69, 0x6f, 0x6e, 0x5f, 0x6b, 0x65, 0x79, 0x18, 0x04, 0x20, 0x01, 0x28, 0x0c, 0x52, 0x0a, + 0x73, 0x65, 0x73, 0x73, 0x69, 0x6f, 0x6e, 0x4b, 0x65, 0x79, 0x12, 0x41, 0x0a, 0x06, 0x6f, 0x62, + 0x6a, 0x65, 0x63, 0x74, 0x18, 0x05, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x27, 0x2e, 0x6e, 0x65, 0x6f, + 0x2e, 0x66, 0x73, 0x2e, 0x76, 0x32, 0x2e, 0x73, 0x65, 0x73, 0x73, 0x69, 0x6f, 0x6e, 0x2e, 0x4f, + 0x62, 0x6a, 0x65, 0x63, 0x74, 0x53, 0x65, 0x73, 0x73, 0x69, 0x6f, 0x6e, 0x43, 0x6f, 0x6e, 0x74, + 0x65, 0x78, 0x74, 0x48, 0x00, 0x52, 0x06, 0x6f, 0x62, 0x6a, 0x65, 0x63, 0x74, 0x12, 0x4a, 0x0a, + 0x09, 0x63, 0x6f, 0x6e, 0x74, 0x61, 0x69, 0x6e, 0x65, 0x72, 0x18, 0x06, 0x20, 0x01, 0x28, 0x0b, + 0x32, 0x2a, 0x2e, 0x6e, 0x65, 0x6f, 0x2e, 0x66, 0x73, 0x2e, 0x76, 0x32, 0x2e, 0x73, 0x65, 0x73, + 0x73, 0x69, 0x6f, 0x6e, 0x2e, 0x43, 0x6f, 0x6e, 0x74, 0x61, 0x69, 0x6e, 0x65, 0x72, 0x53, 0x65, + 0x73, 0x73, 0x69, 0x6f, 0x6e, 0x43, 0x6f, 0x6e, 0x74, 0x65, 0x78, 0x74, 0x48, 0x00, 0x52, 0x09, + 0x63, 0x6f, 0x6e, 0x74, 0x61, 0x69, 0x6e, 0x65, 0x72, 0x1a, 0x45, 0x0a, 0x0d, 0x54, 0x6f, 0x6b, + 0x65, 0x6e, 0x4c, 0x69, 0x66, 0x65, 0x74, 0x69, 0x6d, 0x65, 0x12, 0x10, 0x0a, 0x03, 0x65, 0x78, + 0x70, 0x18, 0x01, 0x20, 0x01, 0x28, 0x04, 0x52, 0x03, 0x65, 0x78, 0x70, 0x12, 0x10, 0x0a, 0x03, + 0x6e, 0x62, 0x66, 0x18, 0x02, 0x20, 0x01, 0x28, 0x04, 0x52, 0x03, 0x6e, 0x62, 0x66, 0x12, 0x10, + 0x0a, 0x03, 0x69, 0x61, 0x74, 0x18, 0x03, 0x20, 0x01, 0x28, 0x04, 0x52, 0x03, 0x69, 0x61, 0x74, + 0x42, 0x09, 0x0a, 0x07, 0x63, 0x6f, 0x6e, 0x74, 0x65, 0x78, 0x74, 0x22, 0x31, 0x0a, 0x07, 0x58, + 0x48, 0x65, 0x61, 0x64, 0x65, 0x72, 0x12, 0x10, 0x0a, 0x03, 0x6b, 0x65, 0x79, 0x18, 0x01, 0x20, + 0x01, 0x28, 0x09, 0x52, 0x03, 0x6b, 0x65, 0x79, 0x12, 0x14, 0x0a, 0x05, 0x76, 0x61, 0x6c, 0x75, + 0x65, 0x18, 0x02, 0x20, 0x01, 0x28, 0x09, 0x52, 0x05, 0x76, 0x61, 0x6c, 0x75, 0x65, 0x22, 0x8d, + 0x03, 0x0a, 0x11, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x4d, 0x65, 0x74, 0x61, 0x48, 0x65, + 0x61, 0x64, 0x65, 0x72, 0x12, 0x31, 0x0a, 0x07, 0x76, 0x65, 0x72, 0x73, 0x69, 0x6f, 0x6e, 0x18, + 0x01, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x17, 0x2e, 0x6e, 0x65, 0x6f, 0x2e, 0x66, 0x73, 0x2e, 0x76, + 0x32, 0x2e, 0x72, 0x65, 0x66, 0x73, 0x2e, 0x56, 0x65, 0x72, 0x73, 0x69, 0x6f, 0x6e, 0x52, 0x07, + 0x76, 0x65, 0x72, 0x73, 0x69, 0x6f, 0x6e, 0x12, 0x14, 0x0a, 0x05, 0x65, 0x70, 0x6f, 0x63, 0x68, + 0x18, 0x02, 0x20, 0x01, 0x28, 0x04, 0x52, 0x05, 0x65, 0x70, 0x6f, 0x63, 0x68, 0x12, 0x10, 0x0a, + 0x03, 0x74, 0x74, 0x6c, 0x18, 0x03, 0x20, 0x01, 0x28, 0x0d, 0x52, 0x03, 0x74, 0x74, 0x6c, 0x12, + 0x37, 0x0a, 0x09, 0x78, 0x5f, 0x68, 0x65, 0x61, 0x64, 0x65, 0x72, 0x73, 0x18, 0x04, 0x20, 0x03, + 0x28, 0x0b, 0x32, 0x1a, 0x2e, 0x6e, 0x65, 0x6f, 0x2e, 0x66, 0x73, 0x2e, 0x76, 0x32, 0x2e, 0x73, + 0x65, 0x73, 0x73, 0x69, 0x6f, 0x6e, 0x2e, 0x58, 0x48, 0x65, 0x61, 0x64, 0x65, 0x72, 0x52, 0x08, + 0x78, 0x48, 0x65, 0x61, 0x64, 0x65, 0x72, 0x73, 0x12, 0x44, 0x0a, 0x0d, 0x73, 0x65, 0x73, 0x73, + 0x69, 0x6f, 0x6e, 0x5f, 0x74, 0x6f, 0x6b, 0x65, 0x6e, 0x18, 0x05, 0x20, 0x01, 0x28, 0x0b, 0x32, + 0x1f, 0x2e, 0x6e, 0x65, 0x6f, 0x2e, 0x66, 0x73, 0x2e, 0x76, 0x32, 0x2e, 0x73, 0x65, 0x73, 0x73, + 0x69, 0x6f, 0x6e, 0x2e, 0x53, 0x65, 0x73, 0x73, 0x69, 0x6f, 0x6e, 0x54, 0x6f, 0x6b, 0x65, 0x6e, + 0x52, 0x0c, 0x73, 0x65, 0x73, 0x73, 0x69, 0x6f, 0x6e, 0x54, 0x6f, 0x6b, 0x65, 0x6e, 0x12, 0x3d, + 0x0a, 0x0c, 0x62, 0x65, 0x61, 0x72, 0x65, 0x72, 0x5f, 0x74, 0x6f, 0x6b, 0x65, 0x6e, 0x18, 0x06, + 0x20, 0x01, 0x28, 0x0b, 0x32, 0x1a, 0x2e, 0x6e, 0x65, 0x6f, 0x2e, 0x66, 0x73, 0x2e, 0x76, 0x32, + 0x2e, 0x61, 0x63, 0x6c, 0x2e, 0x42, 0x65, 0x61, 0x72, 0x65, 0x72, 0x54, 0x6f, 0x6b, 0x65, 0x6e, + 0x52, 0x0b, 0x62, 0x65, 0x61, 0x72, 0x65, 0x72, 0x54, 0x6f, 0x6b, 0x65, 0x6e, 0x12, 0x3c, 0x0a, + 0x06, 0x6f, 0x72, 0x69, 0x67, 0x69, 0x6e, 0x18, 0x07, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x24, 0x2e, + 0x6e, 0x65, 0x6f, 0x2e, 0x66, 0x73, 0x2e, 0x76, 0x32, 0x2e, 0x73, 0x65, 0x73, 0x73, 0x69, 0x6f, + 0x6e, 0x2e, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x4d, 0x65, 0x74, 0x61, 0x48, 0x65, 0x61, + 0x64, 0x65, 0x72, 0x52, 0x06, 0x6f, 0x72, 0x69, 0x67, 0x69, 0x6e, 0x12, 0x21, 0x0a, 0x0c, 0x6d, + 0x61, 0x67, 0x69, 0x63, 0x5f, 0x6e, 0x75, 0x6d, 0x62, 0x65, 0x72, 0x18, 0x08, 0x20, 0x01, 0x28, + 0x04, 0x52, 0x0b, 0x6d, 0x61, 0x67, 0x69, 0x63, 0x4e, 0x75, 0x6d, 0x62, 0x65, 0x72, 0x22, 0x99, + 0x02, 0x0a, 0x12, 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x4d, 0x65, 0x74, 0x61, 0x48, + 0x65, 0x61, 0x64, 0x65, 0x72, 0x12, 0x31, 0x0a, 0x07, 0x76, 0x65, 0x72, 0x73, 0x69, 0x6f, 0x6e, + 0x18, 0x01, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x17, 0x2e, 0x6e, 0x65, 0x6f, 0x2e, 0x66, 0x73, 0x2e, + 0x76, 0x32, 0x2e, 0x72, 0x65, 0x66, 0x73, 0x2e, 0x56, 0x65, 0x72, 0x73, 0x69, 0x6f, 0x6e, 0x52, + 0x07, 0x76, 0x65, 0x72, 0x73, 0x69, 0x6f, 0x6e, 0x12, 0x14, 0x0a, 0x05, 0x65, 0x70, 0x6f, 0x63, + 0x68, 0x18, 0x02, 0x20, 0x01, 0x28, 0x04, 0x52, 0x05, 0x65, 0x70, 0x6f, 0x63, 0x68, 0x12, 0x10, + 0x0a, 0x03, 0x74, 0x74, 0x6c, 0x18, 0x03, 0x20, 0x01, 0x28, 0x0d, 0x52, 0x03, 0x74, 0x74, 0x6c, + 0x12, 0x37, 0x0a, 0x09, 0x78, 0x5f, 0x68, 0x65, 0x61, 0x64, 0x65, 0x72, 0x73, 0x18, 0x04, 0x20, + 0x03, 0x28, 0x0b, 0x32, 0x1a, 0x2e, 0x6e, 0x65, 0x6f, 0x2e, 0x66, 0x73, 0x2e, 0x76, 0x32, 0x2e, + 0x73, 0x65, 0x73, 0x73, 0x69, 0x6f, 0x6e, 0x2e, 0x58, 0x48, 0x65, 0x61, 0x64, 0x65, 0x72, 0x52, + 0x08, 0x78, 0x48, 0x65, 0x61, 0x64, 0x65, 0x72, 0x73, 0x12, 0x3d, 0x0a, 0x06, 0x6f, 0x72, 0x69, + 0x67, 0x69, 0x6e, 0x18, 0x05, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x25, 0x2e, 0x6e, 0x65, 0x6f, 0x2e, + 0x66, 0x73, 0x2e, 0x76, 0x32, 0x2e, 0x73, 0x65, 0x73, 0x73, 0x69, 0x6f, 0x6e, 0x2e, 0x52, 0x65, + 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x4d, 0x65, 0x74, 0x61, 0x48, 0x65, 0x61, 0x64, 0x65, 0x72, + 0x52, 0x06, 0x6f, 0x72, 0x69, 0x67, 0x69, 0x6e, 0x12, 0x30, 0x0a, 0x06, 0x73, 0x74, 0x61, 0x74, + 0x75, 0x73, 0x18, 0x06, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x18, 0x2e, 0x6e, 0x65, 0x6f, 0x2e, 0x66, + 0x73, 0x2e, 0x76, 0x32, 0x2e, 0x73, 0x74, 0x61, 0x74, 0x75, 0x73, 0x2e, 0x53, 0x74, 0x61, 0x74, + 0x75, 0x73, 0x52, 0x06, 0x73, 0x74, 0x61, 0x74, 0x75, 0x73, 0x22, 0xab, 0x02, 0x0a, 0x19, 0x52, + 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x56, 0x65, 0x72, 0x69, 0x66, 0x69, 0x63, 0x61, 0x74, 0x69, + 0x6f, 0x6e, 0x48, 0x65, 0x61, 0x64, 0x65, 0x72, 0x12, 0x40, 0x0a, 0x0e, 0x62, 0x6f, 0x64, 0x79, + 0x5f, 0x73, 0x69, 0x67, 0x6e, 0x61, 0x74, 0x75, 0x72, 0x65, 0x18, 0x01, 0x20, 0x01, 0x28, 0x0b, + 0x32, 0x19, 0x2e, 0x6e, 0x65, 0x6f, 0x2e, 0x66, 0x73, 0x2e, 0x76, 0x32, 0x2e, 0x72, 0x65, 0x66, + 0x73, 0x2e, 0x53, 0x69, 0x67, 0x6e, 0x61, 0x74, 0x75, 0x72, 0x65, 0x52, 0x0d, 0x62, 0x6f, 0x64, + 0x79, 0x53, 0x69, 0x67, 0x6e, 0x61, 0x74, 0x75, 0x72, 0x65, 0x12, 0x40, 0x0a, 0x0e, 0x6d, 0x65, + 0x74, 0x61, 0x5f, 0x73, 0x69, 0x67, 0x6e, 0x61, 0x74, 0x75, 0x72, 0x65, 0x18, 0x02, 0x20, 0x01, + 0x28, 0x0b, 0x32, 0x19, 0x2e, 0x6e, 0x65, 0x6f, 0x2e, 0x66, 0x73, 0x2e, 0x76, 0x32, 0x2e, 0x72, + 0x65, 0x66, 0x73, 0x2e, 0x53, 0x69, 0x67, 0x6e, 0x61, 0x74, 0x75, 0x72, 0x65, 0x52, 0x0d, 0x6d, + 0x65, 0x74, 0x61, 0x53, 0x69, 0x67, 0x6e, 0x61, 0x74, 0x75, 0x72, 0x65, 0x12, 0x44, 0x0a, 0x10, + 0x6f, 0x72, 0x69, 0x67, 0x69, 0x6e, 0x5f, 0x73, 0x69, 0x67, 0x6e, 0x61, 0x74, 0x75, 0x72, 0x65, + 0x18, 0x03, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x19, 0x2e, 0x6e, 0x65, 0x6f, 0x2e, 0x66, 0x73, 0x2e, + 0x76, 0x32, 0x2e, 0x72, 0x65, 0x66, 0x73, 0x2e, 0x53, 0x69, 0x67, 0x6e, 0x61, 0x74, 0x75, 0x72, + 0x65, 0x52, 0x0f, 0x6f, 0x72, 0x69, 0x67, 0x69, 0x6e, 0x53, 0x69, 0x67, 0x6e, 0x61, 0x74, 0x75, + 0x72, 0x65, 0x12, 0x44, 0x0a, 0x06, 0x6f, 0x72, 0x69, 0x67, 0x69, 0x6e, 0x18, 0x04, 0x20, 0x01, + 0x28, 0x0b, 0x32, 0x2c, 0x2e, 0x6e, 0x65, 0x6f, 0x2e, 0x66, 0x73, 0x2e, 0x76, 0x32, 0x2e, 0x73, + 0x65, 0x73, 0x73, 0x69, 0x6f, 0x6e, 0x2e, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x56, 0x65, + 0x72, 0x69, 0x66, 0x69, 0x63, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x48, 0x65, 0x61, 0x64, 0x65, 0x72, + 0x52, 0x06, 0x6f, 0x72, 0x69, 0x67, 0x69, 0x6e, 0x22, 0xad, 0x02, 0x0a, 0x1a, 0x52, 0x65, 0x73, + 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x56, 0x65, 0x72, 0x69, 0x66, 0x69, 0x63, 0x61, 0x74, 0x69, 0x6f, + 0x6e, 0x48, 0x65, 0x61, 0x64, 0x65, 0x72, 0x12, 0x40, 0x0a, 0x0e, 0x62, 0x6f, 0x64, 0x79, 0x5f, + 0x73, 0x69, 0x67, 0x6e, 0x61, 0x74, 0x75, 0x72, 0x65, 0x18, 0x01, 0x20, 0x01, 0x28, 0x0b, 0x32, + 0x19, 0x2e, 0x6e, 0x65, 0x6f, 0x2e, 0x66, 0x73, 0x2e, 0x76, 0x32, 0x2e, 0x72, 0x65, 0x66, 0x73, + 0x2e, 0x53, 0x69, 0x67, 0x6e, 0x61, 0x74, 0x75, 0x72, 0x65, 0x52, 0x0d, 0x62, 0x6f, 0x64, 0x79, + 0x53, 0x69, 0x67, 0x6e, 0x61, 0x74, 0x75, 0x72, 0x65, 0x12, 0x40, 0x0a, 0x0e, 0x6d, 0x65, 0x74, + 0x61, 0x5f, 0x73, 0x69, 0x67, 0x6e, 0x61, 0x74, 0x75, 0x72, 0x65, 0x18, 0x02, 0x20, 0x01, 0x28, + 0x0b, 0x32, 0x19, 0x2e, 0x6e, 0x65, 0x6f, 0x2e, 0x66, 0x73, 0x2e, 0x76, 0x32, 0x2e, 0x72, 0x65, + 0x66, 0x73, 0x2e, 0x53, 0x69, 0x67, 0x6e, 0x61, 0x74, 0x75, 0x72, 0x65, 0x52, 0x0d, 0x6d, 0x65, + 0x74, 0x61, 0x53, 0x69, 0x67, 0x6e, 0x61, 0x74, 0x75, 0x72, 0x65, 0x12, 0x44, 0x0a, 0x10, 0x6f, + 0x72, 0x69, 0x67, 0x69, 0x6e, 0x5f, 0x73, 0x69, 0x67, 0x6e, 0x61, 0x74, 0x75, 0x72, 0x65, 0x18, + 0x03, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x19, 0x2e, 0x6e, 0x65, 0x6f, 0x2e, 0x66, 0x73, 0x2e, 0x76, + 0x32, 0x2e, 0x72, 0x65, 0x66, 0x73, 0x2e, 0x53, 0x69, 0x67, 0x6e, 0x61, 0x74, 0x75, 0x72, 0x65, + 0x52, 0x0f, 0x6f, 0x72, 0x69, 0x67, 0x69, 0x6e, 0x53, 0x69, 0x67, 0x6e, 0x61, 0x74, 0x75, 0x72, + 0x65, 0x12, 0x45, 0x0a, 0x06, 0x6f, 0x72, 0x69, 0x67, 0x69, 0x6e, 0x18, 0x04, 0x20, 0x01, 0x28, + 0x0b, 0x32, 0x2d, 0x2e, 0x6e, 0x65, 0x6f, 0x2e, 0x66, 0x73, 0x2e, 0x76, 0x32, 0x2e, 0x73, 0x65, + 0x73, 0x73, 0x69, 0x6f, 0x6e, 0x2e, 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x56, 0x65, + 0x72, 0x69, 0x66, 0x69, 0x63, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x48, 0x65, 0x61, 0x64, 0x65, 0x72, + 0x52, 0x06, 0x6f, 0x72, 0x69, 0x67, 0x69, 0x6e, 0x42, 0x59, 0x5a, 0x39, 0x67, 0x69, 0x74, 0x68, + 0x75, 0x62, 0x2e, 0x63, 0x6f, 0x6d, 0x2f, 0x6e, 0x73, 0x70, 0x63, 0x63, 0x2d, 0x64, 0x65, 0x76, + 0x2f, 0x6e, 0x65, 0x6f, 0x66, 0x73, 0x2d, 0x61, 0x70, 0x69, 0x2d, 0x67, 0x6f, 0x2f, 0x76, 0x32, + 0x2f, 0x73, 0x65, 0x73, 0x73, 0x69, 0x6f, 0x6e, 0x2f, 0x67, 0x72, 0x70, 0x63, 0x3b, 0x73, 0x65, + 0x73, 0x73, 0x69, 0x6f, 0x6e, 0xaa, 0x02, 0x1b, 0x4e, 0x65, 0x6f, 0x2e, 0x46, 0x69, 0x6c, 0x65, + 0x53, 0x74, 0x6f, 0x72, 0x61, 0x67, 0x65, 0x2e, 0x41, 0x50, 0x49, 0x2e, 0x53, 0x65, 0x73, 0x73, + 0x69, 0x6f, 0x6e, 0x62, 0x06, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x33, +} + +var ( + file_session_grpc_types_proto_rawDescOnce sync.Once + file_session_grpc_types_proto_rawDescData = file_session_grpc_types_proto_rawDesc +) + +func file_session_grpc_types_proto_rawDescGZIP() []byte { + file_session_grpc_types_proto_rawDescOnce.Do(func() { + file_session_grpc_types_proto_rawDescData = protoimpl.X.CompressGZIP(file_session_grpc_types_proto_rawDescData) + }) + return file_session_grpc_types_proto_rawDescData +} + +var file_session_grpc_types_proto_enumTypes = make([]protoimpl.EnumInfo, 2) +var file_session_grpc_types_proto_msgTypes = make([]protoimpl.MessageInfo, 11) +var file_session_grpc_types_proto_goTypes = []interface{}{ + (ObjectSessionContext_Verb)(0), // 0: neo.fs.v2.session.ObjectSessionContext.Verb + (ContainerSessionContext_Verb)(0), // 1: neo.fs.v2.session.ContainerSessionContext.Verb + (*ObjectSessionContext)(nil), // 2: neo.fs.v2.session.ObjectSessionContext + (*ContainerSessionContext)(nil), // 3: neo.fs.v2.session.ContainerSessionContext + (*SessionToken)(nil), // 4: neo.fs.v2.session.SessionToken + (*XHeader)(nil), // 5: neo.fs.v2.session.XHeader + (*RequestMetaHeader)(nil), // 6: neo.fs.v2.session.RequestMetaHeader + (*ResponseMetaHeader)(nil), // 7: neo.fs.v2.session.ResponseMetaHeader + (*RequestVerificationHeader)(nil), // 8: neo.fs.v2.session.RequestVerificationHeader + (*ResponseVerificationHeader)(nil), // 9: neo.fs.v2.session.ResponseVerificationHeader + (*ObjectSessionContext_Target)(nil), // 10: neo.fs.v2.session.ObjectSessionContext.Target + (*SessionToken_Body)(nil), // 11: neo.fs.v2.session.SessionToken.Body + (*SessionToken_Body_TokenLifetime)(nil), // 12: neo.fs.v2.session.SessionToken.Body.TokenLifetime + (*refs.ContainerID)(nil), // 13: neo.fs.v2.refs.ContainerID + (*refs.Signature)(nil), // 14: neo.fs.v2.refs.Signature + (*refs.Version)(nil), // 15: neo.fs.v2.refs.Version + (*acl.BearerToken)(nil), // 16: neo.fs.v2.acl.BearerToken + (*status.Status)(nil), // 17: neo.fs.v2.status.Status + (*refs.ObjectID)(nil), // 18: neo.fs.v2.refs.ObjectID + (*refs.OwnerID)(nil), // 19: neo.fs.v2.refs.OwnerID +} +var file_session_grpc_types_proto_depIdxs = []int32{ + 0, // 0: neo.fs.v2.session.ObjectSessionContext.verb:type_name -> neo.fs.v2.session.ObjectSessionContext.Verb + 10, // 1: neo.fs.v2.session.ObjectSessionContext.target:type_name -> neo.fs.v2.session.ObjectSessionContext.Target + 1, // 2: neo.fs.v2.session.ContainerSessionContext.verb:type_name -> neo.fs.v2.session.ContainerSessionContext.Verb + 13, // 3: neo.fs.v2.session.ContainerSessionContext.container_id:type_name -> neo.fs.v2.refs.ContainerID + 11, // 4: neo.fs.v2.session.SessionToken.body:type_name -> neo.fs.v2.session.SessionToken.Body + 14, // 5: neo.fs.v2.session.SessionToken.signature:type_name -> neo.fs.v2.refs.Signature + 15, // 6: neo.fs.v2.session.RequestMetaHeader.version:type_name -> neo.fs.v2.refs.Version + 5, // 7: neo.fs.v2.session.RequestMetaHeader.x_headers:type_name -> neo.fs.v2.session.XHeader + 4, // 8: neo.fs.v2.session.RequestMetaHeader.session_token:type_name -> neo.fs.v2.session.SessionToken + 16, // 9: neo.fs.v2.session.RequestMetaHeader.bearer_token:type_name -> neo.fs.v2.acl.BearerToken + 6, // 10: neo.fs.v2.session.RequestMetaHeader.origin:type_name -> neo.fs.v2.session.RequestMetaHeader + 15, // 11: neo.fs.v2.session.ResponseMetaHeader.version:type_name -> neo.fs.v2.refs.Version + 5, // 12: neo.fs.v2.session.ResponseMetaHeader.x_headers:type_name -> neo.fs.v2.session.XHeader + 7, // 13: neo.fs.v2.session.ResponseMetaHeader.origin:type_name -> neo.fs.v2.session.ResponseMetaHeader + 17, // 14: neo.fs.v2.session.ResponseMetaHeader.status:type_name -> neo.fs.v2.status.Status + 14, // 15: neo.fs.v2.session.RequestVerificationHeader.body_signature:type_name -> neo.fs.v2.refs.Signature + 14, // 16: neo.fs.v2.session.RequestVerificationHeader.meta_signature:type_name -> neo.fs.v2.refs.Signature + 14, // 17: neo.fs.v2.session.RequestVerificationHeader.origin_signature:type_name -> neo.fs.v2.refs.Signature + 8, // 18: neo.fs.v2.session.RequestVerificationHeader.origin:type_name -> neo.fs.v2.session.RequestVerificationHeader + 14, // 19: neo.fs.v2.session.ResponseVerificationHeader.body_signature:type_name -> neo.fs.v2.refs.Signature + 14, // 20: neo.fs.v2.session.ResponseVerificationHeader.meta_signature:type_name -> neo.fs.v2.refs.Signature + 14, // 21: neo.fs.v2.session.ResponseVerificationHeader.origin_signature:type_name -> neo.fs.v2.refs.Signature + 9, // 22: neo.fs.v2.session.ResponseVerificationHeader.origin:type_name -> neo.fs.v2.session.ResponseVerificationHeader + 13, // 23: neo.fs.v2.session.ObjectSessionContext.Target.container:type_name -> neo.fs.v2.refs.ContainerID + 18, // 24: neo.fs.v2.session.ObjectSessionContext.Target.objects:type_name -> neo.fs.v2.refs.ObjectID + 19, // 25: neo.fs.v2.session.SessionToken.Body.owner_id:type_name -> neo.fs.v2.refs.OwnerID + 12, // 26: neo.fs.v2.session.SessionToken.Body.lifetime:type_name -> neo.fs.v2.session.SessionToken.Body.TokenLifetime + 2, // 27: neo.fs.v2.session.SessionToken.Body.object:type_name -> neo.fs.v2.session.ObjectSessionContext + 3, // 28: neo.fs.v2.session.SessionToken.Body.container:type_name -> neo.fs.v2.session.ContainerSessionContext + 29, // [29:29] is the sub-list for method output_type + 29, // [29:29] is the sub-list for method input_type + 29, // [29:29] is the sub-list for extension type_name + 29, // [29:29] is the sub-list for extension extendee + 0, // [0:29] is the sub-list for field type_name +} + +func init() { file_session_grpc_types_proto_init() } +func file_session_grpc_types_proto_init() { + if File_session_grpc_types_proto != nil { + return + } + if !protoimpl.UnsafeEnabled { + file_session_grpc_types_proto_msgTypes[0].Exporter = func(v interface{}, i int) interface{} { + switch v := v.(*ObjectSessionContext); i { + case 0: + return &v.state + case 1: + return &v.sizeCache + case 2: + return &v.unknownFields + default: + return nil + } + } + file_session_grpc_types_proto_msgTypes[1].Exporter = func(v interface{}, i int) interface{} { + switch v := v.(*ContainerSessionContext); i { + case 0: + return &v.state + case 1: + return &v.sizeCache + case 2: + return &v.unknownFields + default: + return nil + } + } + file_session_grpc_types_proto_msgTypes[2].Exporter = func(v interface{}, i int) interface{} { + switch v := v.(*SessionToken); i { + case 0: + return &v.state + case 1: + return &v.sizeCache + case 2: + return &v.unknownFields + default: + return nil + } + } + file_session_grpc_types_proto_msgTypes[3].Exporter = func(v interface{}, i int) interface{} { + switch v := v.(*XHeader); i { + case 0: + return &v.state + case 1: + return &v.sizeCache + case 2: + return &v.unknownFields + default: + return nil + } + } + file_session_grpc_types_proto_msgTypes[4].Exporter = func(v interface{}, i int) interface{} { + switch v := v.(*RequestMetaHeader); i { + case 0: + return &v.state + case 1: + return &v.sizeCache + case 2: + return &v.unknownFields + default: + return nil + } + } + file_session_grpc_types_proto_msgTypes[5].Exporter = func(v interface{}, i int) interface{} { + switch v := v.(*ResponseMetaHeader); i { + case 0: + return &v.state + case 1: + return &v.sizeCache + case 2: + return &v.unknownFields + default: + return nil + } + } + file_session_grpc_types_proto_msgTypes[6].Exporter = func(v interface{}, i int) interface{} { + switch v := v.(*RequestVerificationHeader); i { + case 0: + return &v.state + case 1: + return &v.sizeCache + case 2: + return &v.unknownFields + default: + return nil + } + } + file_session_grpc_types_proto_msgTypes[7].Exporter = func(v interface{}, i int) interface{} { + switch v := v.(*ResponseVerificationHeader); i { + case 0: + return &v.state + case 1: + return &v.sizeCache + case 2: + return &v.unknownFields + default: + return nil + } + } + file_session_grpc_types_proto_msgTypes[8].Exporter = func(v interface{}, i int) interface{} { + switch v := v.(*ObjectSessionContext_Target); i { + case 0: + return &v.state + case 1: + return &v.sizeCache + case 2: + return &v.unknownFields + default: + return nil + } + } + file_session_grpc_types_proto_msgTypes[9].Exporter = func(v interface{}, i int) interface{} { + switch v := v.(*SessionToken_Body); i { + case 0: + return &v.state + case 1: + return &v.sizeCache + case 2: + return &v.unknownFields + default: + return nil + } + } + file_session_grpc_types_proto_msgTypes[10].Exporter = func(v interface{}, i int) interface{} { + switch v := v.(*SessionToken_Body_TokenLifetime); i { + case 0: + return &v.state + case 1: + return &v.sizeCache + case 2: + return &v.unknownFields + default: + return nil + } + } + } + file_session_grpc_types_proto_msgTypes[9].OneofWrappers = []interface{}{ + (*SessionToken_Body_Object)(nil), + (*SessionToken_Body_Container)(nil), + } + type x struct{} + out := protoimpl.TypeBuilder{ + File: protoimpl.DescBuilder{ + GoPackagePath: reflect.TypeOf(x{}).PkgPath(), + RawDescriptor: file_session_grpc_types_proto_rawDesc, + NumEnums: 2, + NumMessages: 11, + NumExtensions: 0, + NumServices: 0, + }, + GoTypes: file_session_grpc_types_proto_goTypes, + DependencyIndexes: file_session_grpc_types_proto_depIdxs, + EnumInfos: file_session_grpc_types_proto_enumTypes, + MessageInfos: file_session_grpc_types_proto_msgTypes, + }.Build() + File_session_grpc_types_proto = out.File + file_session_grpc_types_proto_rawDesc = nil + file_session_grpc_types_proto_goTypes = nil + file_session_grpc_types_proto_depIdxs = nil +} diff --git a/api/status/codes.go b/api/status/codes.go new file mode 100644 index 000000000..1005aa3ea --- /dev/null +++ b/api/status/codes.go @@ -0,0 +1,26 @@ +package status + +// Supported status codes. +const ( + OK = 0 + InternalServerError = 1024 + WrongNetMagic = 1025 + SignatureVerificationFail = 1026 + NodeUnderMaintenance = 1027 + ObjectAccessDenied = 2048 + ObjectNotFound = 2049 + ObjectLocked = 2050 + LockIrregularObject = 2051 + ObjectAlreadyRemoved = 2052 + OutOfRange = 2053 + ContainerNotFound = 3072 + EACLNotFound = 3073 + SessionTokenNotFound = 4096 + SessionTokenExpired = 4097 +) + +// Supported status details. +const ( + DetailCorrectNetMagic = 0 + DetailObjectAccessDenialReason = 0 +) diff --git a/api/status/encoding.go b/api/status/encoding.go new file mode 100644 index 000000000..756c2d692 --- /dev/null +++ b/api/status/encoding.go @@ -0,0 +1,56 @@ +package status + +import ( + "github.com/nspcc-dev/neofs-sdk-go/internal/proto" +) + +const ( + _ = iota + fieldDetailID + fieldDetailValue +) + +func (x *Status_Detail) MarshaledSize() int { + var sz int + if x != nil { + sz = proto.SizeVarint(fieldDetailID, x.Id) + + proto.SizeBytes(fieldDetailValue, x.Value) + } + return sz +} + +func (x *Status_Detail) MarshalStable(b []byte) { + if x != nil { + off := proto.MarshalVarint(b, fieldDetailID, x.Id) + proto.MarshalBytes(b[off:], fieldDetailValue, x.Value) + } +} + +const ( + _ = iota + fieldStatusCode + fieldStatusMessage + fieldStatusDetails +) + +func (x *Status) MarshaledSize() int { + var sz int + if x != nil { + sz = proto.SizeVarint(fieldStatusCode, x.Code) + + proto.SizeBytes(fieldStatusMessage, x.Message) + for i := range x.Details { + sz += proto.SizeNested(fieldStatusDetails, x.Details[i]) + } + } + return sz +} + +func (x *Status) MarshalStable(b []byte) { + if x != nil { + off := proto.MarshalVarint(b, fieldStatusCode, x.Code) + off += proto.MarshalBytes(b[off:], fieldStatusMessage, x.Message) + for i := range x.Details { + off += proto.MarshalNested(b[off:], fieldStatusDetails, x.Details[i]) + } + } +} diff --git a/api/status/encoding_test.go b/api/status/encoding_test.go new file mode 100644 index 000000000..87eece88a --- /dev/null +++ b/api/status/encoding_test.go @@ -0,0 +1,32 @@ +package status_test + +import ( + "testing" + + "github.com/nspcc-dev/neofs-sdk-go/api/status" + "github.com/stretchr/testify/require" + "google.golang.org/protobuf/proto" +) + +func TestStatus(t *testing.T) { + v := &status.Status{ + Code: 1, + Message: "any_message", + Details: []*status.Status_Detail{ + {Id: 2, Value: []byte("any_detail1")}, + {Id: 3, Value: []byte("any_detail2")}, + }, + } + + sz := v.MarshaledSize() + b := make([]byte, sz) + v.MarshalStable(b) + + var res status.Status + err := proto.Unmarshal(b, &res) + require.NoError(t, err) + require.Empty(t, res.ProtoReflect().GetUnknown()) + require.Equal(t, v.Code, res.Code) + require.Equal(t, v.Message, res.Message) + require.Equal(t, v.Details, res.Details) +} diff --git a/api/status/types.pb.go b/api/status/types.pb.go new file mode 100644 index 000000000..9e8e06b9b --- /dev/null +++ b/api/status/types.pb.go @@ -0,0 +1,654 @@ +// Code generated by protoc-gen-go. DO NOT EDIT. +// versions: +// protoc-gen-go v1.32.0 +// protoc v4.25.1 +// source: status/grpc/types.proto + +package status + +import ( + reflect "reflect" + sync "sync" + + protoreflect "google.golang.org/protobuf/reflect/protoreflect" + protoimpl "google.golang.org/protobuf/runtime/protoimpl" +) + +const ( + // Verify that this generated code is sufficiently up-to-date. + _ = protoimpl.EnforceVersion(20 - protoimpl.MinVersion) + // Verify that runtime/protoimpl is sufficiently up-to-date. + _ = protoimpl.EnforceVersion(protoimpl.MaxVersion - 20) +) + +// Section identifiers. +type Section int32 + +const ( + // Successful return codes. + Section_SECTION_SUCCESS Section = 0 + // Failure codes regardless of the operation. + Section_SECTION_FAILURE_COMMON Section = 1 + // Object service-specific errors. + Section_SECTION_OBJECT Section = 2 + // Container service-specific errors. + Section_SECTION_CONTAINER Section = 3 + // Session service-specific errors. + Section_SECTION_SESSION Section = 4 +) + +// Enum value maps for Section. +var ( + Section_name = map[int32]string{ + 0: "SECTION_SUCCESS", + 1: "SECTION_FAILURE_COMMON", + 2: "SECTION_OBJECT", + 3: "SECTION_CONTAINER", + 4: "SECTION_SESSION", + } + Section_value = map[string]int32{ + "SECTION_SUCCESS": 0, + "SECTION_FAILURE_COMMON": 1, + "SECTION_OBJECT": 2, + "SECTION_CONTAINER": 3, + "SECTION_SESSION": 4, + } +) + +func (x Section) Enum() *Section { + p := new(Section) + *p = x + return p +} + +func (x Section) String() string { + return protoimpl.X.EnumStringOf(x.Descriptor(), protoreflect.EnumNumber(x)) +} + +func (Section) Descriptor() protoreflect.EnumDescriptor { + return file_status_grpc_types_proto_enumTypes[0].Descriptor() +} + +func (Section) Type() protoreflect.EnumType { + return &file_status_grpc_types_proto_enumTypes[0] +} + +func (x Section) Number() protoreflect.EnumNumber { + return protoreflect.EnumNumber(x) +} + +// Deprecated: Use Section.Descriptor instead. +func (Section) EnumDescriptor() ([]byte, []int) { + return file_status_grpc_types_proto_rawDescGZIP(), []int{0} +} + +// Section of NeoFS successful return codes. +type Success int32 + +const ( + // [**0**] Default success. Not detailed. + // If the server cannot match successful outcome to the code, it should + // use this code. + Success_OK Success = 0 +) + +// Enum value maps for Success. +var ( + Success_name = map[int32]string{ + 0: "OK", + } + Success_value = map[string]int32{ + "OK": 0, + } +) + +func (x Success) Enum() *Success { + p := new(Success) + *p = x + return p +} + +func (x Success) String() string { + return protoimpl.X.EnumStringOf(x.Descriptor(), protoreflect.EnumNumber(x)) +} + +func (Success) Descriptor() protoreflect.EnumDescriptor { + return file_status_grpc_types_proto_enumTypes[1].Descriptor() +} + +func (Success) Type() protoreflect.EnumType { + return &file_status_grpc_types_proto_enumTypes[1] +} + +func (x Success) Number() protoreflect.EnumNumber { + return protoreflect.EnumNumber(x) +} + +// Deprecated: Use Success.Descriptor instead. +func (Success) EnumDescriptor() ([]byte, []int) { + return file_status_grpc_types_proto_rawDescGZIP(), []int{1} +} + +// Section of failed statuses independent of the operation. +type CommonFail int32 + +const ( + // [**1024**] Internal server error, default failure. Not detailed. + // If the server cannot match failed outcome to the code, it should + // use this code. + CommonFail_INTERNAL CommonFail = 0 + // [**1025**] Wrong magic of the NeoFS network. + // Details: + // - [**0**] Magic number of the served NeoFS network (big-endian 64-bit + // unsigned integer). + CommonFail_WRONG_MAGIC_NUMBER CommonFail = 1 + // [**1026**] Signature verification failure. + CommonFail_SIGNATURE_VERIFICATION_FAIL CommonFail = 2 + // [**1027**] Node is under maintenance. + CommonFail_NODE_UNDER_MAINTENANCE CommonFail = 3 +) + +// Enum value maps for CommonFail. +var ( + CommonFail_name = map[int32]string{ + 0: "INTERNAL", + 1: "WRONG_MAGIC_NUMBER", + 2: "SIGNATURE_VERIFICATION_FAIL", + 3: "NODE_UNDER_MAINTENANCE", + } + CommonFail_value = map[string]int32{ + "INTERNAL": 0, + "WRONG_MAGIC_NUMBER": 1, + "SIGNATURE_VERIFICATION_FAIL": 2, + "NODE_UNDER_MAINTENANCE": 3, + } +) + +func (x CommonFail) Enum() *CommonFail { + p := new(CommonFail) + *p = x + return p +} + +func (x CommonFail) String() string { + return protoimpl.X.EnumStringOf(x.Descriptor(), protoreflect.EnumNumber(x)) +} + +func (CommonFail) Descriptor() protoreflect.EnumDescriptor { + return file_status_grpc_types_proto_enumTypes[2].Descriptor() +} + +func (CommonFail) Type() protoreflect.EnumType { + return &file_status_grpc_types_proto_enumTypes[2] +} + +func (x CommonFail) Number() protoreflect.EnumNumber { + return protoreflect.EnumNumber(x) +} + +// Deprecated: Use CommonFail.Descriptor instead. +func (CommonFail) EnumDescriptor() ([]byte, []int) { + return file_status_grpc_types_proto_rawDescGZIP(), []int{2} +} + +// Section of statuses for object-related operations. +type Object int32 + +const ( + // [**2048**] Access denied by ACL. + // Details: + // - [**0**] Human-readable description (UTF-8 encoded string). + Object_ACCESS_DENIED Object = 0 + // [**2049**] Object not found. + Object_OBJECT_NOT_FOUND Object = 1 + // [**2050**] Operation rejected by the object lock. + Object_LOCKED Object = 2 + // [**2051**] Locking an object with a non-REGULAR type rejected. + Object_LOCK_NON_REGULAR_OBJECT Object = 3 + // [**2052**] Object has been marked deleted. + Object_OBJECT_ALREADY_REMOVED Object = 4 + // [**2053**] Invalid range has been requested for an object. + Object_OUT_OF_RANGE Object = 5 +) + +// Enum value maps for Object. +var ( + Object_name = map[int32]string{ + 0: "ACCESS_DENIED", + 1: "OBJECT_NOT_FOUND", + 2: "LOCKED", + 3: "LOCK_NON_REGULAR_OBJECT", + 4: "OBJECT_ALREADY_REMOVED", + 5: "OUT_OF_RANGE", + } + Object_value = map[string]int32{ + "ACCESS_DENIED": 0, + "OBJECT_NOT_FOUND": 1, + "LOCKED": 2, + "LOCK_NON_REGULAR_OBJECT": 3, + "OBJECT_ALREADY_REMOVED": 4, + "OUT_OF_RANGE": 5, + } +) + +func (x Object) Enum() *Object { + p := new(Object) + *p = x + return p +} + +func (x Object) String() string { + return protoimpl.X.EnumStringOf(x.Descriptor(), protoreflect.EnumNumber(x)) +} + +func (Object) Descriptor() protoreflect.EnumDescriptor { + return file_status_grpc_types_proto_enumTypes[3].Descriptor() +} + +func (Object) Type() protoreflect.EnumType { + return &file_status_grpc_types_proto_enumTypes[3] +} + +func (x Object) Number() protoreflect.EnumNumber { + return protoreflect.EnumNumber(x) +} + +// Deprecated: Use Object.Descriptor instead. +func (Object) EnumDescriptor() ([]byte, []int) { + return file_status_grpc_types_proto_rawDescGZIP(), []int{3} +} + +// Section of statuses for container-related operations. +type Container int32 + +const ( + // [**3072**] Container not found. + Container_CONTAINER_NOT_FOUND Container = 0 + // [**3073**] eACL table not found. + Container_EACL_NOT_FOUND Container = 1 +) + +// Enum value maps for Container. +var ( + Container_name = map[int32]string{ + 0: "CONTAINER_NOT_FOUND", + 1: "EACL_NOT_FOUND", + } + Container_value = map[string]int32{ + "CONTAINER_NOT_FOUND": 0, + "EACL_NOT_FOUND": 1, + } +) + +func (x Container) Enum() *Container { + p := new(Container) + *p = x + return p +} + +func (x Container) String() string { + return protoimpl.X.EnumStringOf(x.Descriptor(), protoreflect.EnumNumber(x)) +} + +func (Container) Descriptor() protoreflect.EnumDescriptor { + return file_status_grpc_types_proto_enumTypes[4].Descriptor() +} + +func (Container) Type() protoreflect.EnumType { + return &file_status_grpc_types_proto_enumTypes[4] +} + +func (x Container) Number() protoreflect.EnumNumber { + return protoreflect.EnumNumber(x) +} + +// Deprecated: Use Container.Descriptor instead. +func (Container) EnumDescriptor() ([]byte, []int) { + return file_status_grpc_types_proto_rawDescGZIP(), []int{4} +} + +// Section of statuses for session-related operations. +type Session int32 + +const ( + // [**4096**] Token not found. + Session_TOKEN_NOT_FOUND Session = 0 + // [**4097**] Token has expired. + Session_TOKEN_EXPIRED Session = 1 +) + +// Enum value maps for Session. +var ( + Session_name = map[int32]string{ + 0: "TOKEN_NOT_FOUND", + 1: "TOKEN_EXPIRED", + } + Session_value = map[string]int32{ + "TOKEN_NOT_FOUND": 0, + "TOKEN_EXPIRED": 1, + } +) + +func (x Session) Enum() *Session { + p := new(Session) + *p = x + return p +} + +func (x Session) String() string { + return protoimpl.X.EnumStringOf(x.Descriptor(), protoreflect.EnumNumber(x)) +} + +func (Session) Descriptor() protoreflect.EnumDescriptor { + return file_status_grpc_types_proto_enumTypes[5].Descriptor() +} + +func (Session) Type() protoreflect.EnumType { + return &file_status_grpc_types_proto_enumTypes[5] +} + +func (x Session) Number() protoreflect.EnumNumber { + return protoreflect.EnumNumber(x) +} + +// Deprecated: Use Session.Descriptor instead. +func (Session) EnumDescriptor() ([]byte, []int) { + return file_status_grpc_types_proto_rawDescGZIP(), []int{5} +} + +// Declares the general format of the status returns of the NeoFS RPC protocol. +// Status is present in all response messages. Each RPC of NeoFS protocol +// describes the possible outcomes and details of the operation. +// +// Each status is assigned a one-to-one numeric code. Any unique result of an +// operation in NeoFS is unambiguously associated with the code value. +// +// Numerical set of codes is split into 1024-element sections. An enumeration +// is defined for each section. Values can be referred to in the following ways: +// +// * numerical value ranging from 0 to 4,294,967,295 (global code); +// +// - values from enumeration (local code). The formula for the ratio of the +// local code (`L`) of a defined section (`S`) to the global one (`G`): +// `G = 1024 * S + L`. +// +// All outcomes are divided into successful and failed, which corresponds +// to the success or failure of the operation. The definition of success +// follows the semantics of RPC and the description of its purpose. +// The server must not attach code that is the opposite of the outcome type. +// +// See the set of return codes in the description for calls. +// +// Each status can carry a developer-facing error message. It should be a human +// readable text in English. The server should not transmit (and the client +// should not expect) useful information in the message. Field `details` +// should make the return more detailed. +type Status struct { + state protoimpl.MessageState + sizeCache protoimpl.SizeCache + unknownFields protoimpl.UnknownFields + + // The status code + Code uint32 `protobuf:"varint,1,opt,name=code,proto3" json:"code,omitempty"` + // Developer-facing error message + Message string `protobuf:"bytes,2,opt,name=message,proto3" json:"message,omitempty"` + // Data detailing the outcome of the operation. Must be unique by ID. + Details []*Status_Detail `protobuf:"bytes,3,rep,name=details,proto3" json:"details,omitempty"` +} + +func (x *Status) Reset() { + *x = Status{} + if protoimpl.UnsafeEnabled { + mi := &file_status_grpc_types_proto_msgTypes[0] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) + } +} + +func (x *Status) String() string { + return protoimpl.X.MessageStringOf(x) +} + +func (*Status) ProtoMessage() {} + +func (x *Status) ProtoReflect() protoreflect.Message { + mi := &file_status_grpc_types_proto_msgTypes[0] + if protoimpl.UnsafeEnabled && x != nil { + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + if ms.LoadMessageInfo() == nil { + ms.StoreMessageInfo(mi) + } + return ms + } + return mi.MessageOf(x) +} + +// Deprecated: Use Status.ProtoReflect.Descriptor instead. +func (*Status) Descriptor() ([]byte, []int) { + return file_status_grpc_types_proto_rawDescGZIP(), []int{0} +} + +func (x *Status) GetCode() uint32 { + if x != nil { + return x.Code + } + return 0 +} + +func (x *Status) GetMessage() string { + if x != nil { + return x.Message + } + return "" +} + +func (x *Status) GetDetails() []*Status_Detail { + if x != nil { + return x.Details + } + return nil +} + +// Return detail. It contains additional information that can be used to +// analyze the response. Each code defines a set of details that can be +// attached to a status. Client should not handle details that are not +// covered by the code. +type Status_Detail struct { + state protoimpl.MessageState + sizeCache protoimpl.SizeCache + unknownFields protoimpl.UnknownFields + + // Detail ID. The identifier is required to determine the binary format + // of the detail and how to decode it. + Id uint32 `protobuf:"varint,1,opt,name=id,proto3" json:"id,omitempty"` + // Binary status detail. Must follow the format associated with ID. + // The possibility of missing a value must be explicitly allowed. + Value []byte `protobuf:"bytes,2,opt,name=value,proto3" json:"value,omitempty"` +} + +func (x *Status_Detail) Reset() { + *x = Status_Detail{} + if protoimpl.UnsafeEnabled { + mi := &file_status_grpc_types_proto_msgTypes[1] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) + } +} + +func (x *Status_Detail) String() string { + return protoimpl.X.MessageStringOf(x) +} + +func (*Status_Detail) ProtoMessage() {} + +func (x *Status_Detail) ProtoReflect() protoreflect.Message { + mi := &file_status_grpc_types_proto_msgTypes[1] + if protoimpl.UnsafeEnabled && x != nil { + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + if ms.LoadMessageInfo() == nil { + ms.StoreMessageInfo(mi) + } + return ms + } + return mi.MessageOf(x) +} + +// Deprecated: Use Status_Detail.ProtoReflect.Descriptor instead. +func (*Status_Detail) Descriptor() ([]byte, []int) { + return file_status_grpc_types_proto_rawDescGZIP(), []int{0, 0} +} + +func (x *Status_Detail) GetId() uint32 { + if x != nil { + return x.Id + } + return 0 +} + +func (x *Status_Detail) GetValue() []byte { + if x != nil { + return x.Value + } + return nil +} + +var File_status_grpc_types_proto protoreflect.FileDescriptor + +var file_status_grpc_types_proto_rawDesc = []byte{ + 0x0a, 0x17, 0x73, 0x74, 0x61, 0x74, 0x75, 0x73, 0x2f, 0x67, 0x72, 0x70, 0x63, 0x2f, 0x74, 0x79, + 0x70, 0x65, 0x73, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x12, 0x10, 0x6e, 0x65, 0x6f, 0x2e, 0x66, + 0x73, 0x2e, 0x76, 0x32, 0x2e, 0x73, 0x74, 0x61, 0x74, 0x75, 0x73, 0x22, 0xa1, 0x01, 0x0a, 0x06, + 0x53, 0x74, 0x61, 0x74, 0x75, 0x73, 0x12, 0x12, 0x0a, 0x04, 0x63, 0x6f, 0x64, 0x65, 0x18, 0x01, + 0x20, 0x01, 0x28, 0x0d, 0x52, 0x04, 0x63, 0x6f, 0x64, 0x65, 0x12, 0x18, 0x0a, 0x07, 0x6d, 0x65, + 0x73, 0x73, 0x61, 0x67, 0x65, 0x18, 0x02, 0x20, 0x01, 0x28, 0x09, 0x52, 0x07, 0x6d, 0x65, 0x73, + 0x73, 0x61, 0x67, 0x65, 0x12, 0x39, 0x0a, 0x07, 0x64, 0x65, 0x74, 0x61, 0x69, 0x6c, 0x73, 0x18, + 0x03, 0x20, 0x03, 0x28, 0x0b, 0x32, 0x1f, 0x2e, 0x6e, 0x65, 0x6f, 0x2e, 0x66, 0x73, 0x2e, 0x76, + 0x32, 0x2e, 0x73, 0x74, 0x61, 0x74, 0x75, 0x73, 0x2e, 0x53, 0x74, 0x61, 0x74, 0x75, 0x73, 0x2e, + 0x44, 0x65, 0x74, 0x61, 0x69, 0x6c, 0x52, 0x07, 0x64, 0x65, 0x74, 0x61, 0x69, 0x6c, 0x73, 0x1a, + 0x2e, 0x0a, 0x06, 0x44, 0x65, 0x74, 0x61, 0x69, 0x6c, 0x12, 0x0e, 0x0a, 0x02, 0x69, 0x64, 0x18, + 0x01, 0x20, 0x01, 0x28, 0x0d, 0x52, 0x02, 0x69, 0x64, 0x12, 0x14, 0x0a, 0x05, 0x76, 0x61, 0x6c, + 0x75, 0x65, 0x18, 0x02, 0x20, 0x01, 0x28, 0x0c, 0x52, 0x05, 0x76, 0x61, 0x6c, 0x75, 0x65, 0x2a, + 0x7a, 0x0a, 0x07, 0x53, 0x65, 0x63, 0x74, 0x69, 0x6f, 0x6e, 0x12, 0x13, 0x0a, 0x0f, 0x53, 0x45, + 0x43, 0x54, 0x49, 0x4f, 0x4e, 0x5f, 0x53, 0x55, 0x43, 0x43, 0x45, 0x53, 0x53, 0x10, 0x00, 0x12, + 0x1a, 0x0a, 0x16, 0x53, 0x45, 0x43, 0x54, 0x49, 0x4f, 0x4e, 0x5f, 0x46, 0x41, 0x49, 0x4c, 0x55, + 0x52, 0x45, 0x5f, 0x43, 0x4f, 0x4d, 0x4d, 0x4f, 0x4e, 0x10, 0x01, 0x12, 0x12, 0x0a, 0x0e, 0x53, + 0x45, 0x43, 0x54, 0x49, 0x4f, 0x4e, 0x5f, 0x4f, 0x42, 0x4a, 0x45, 0x43, 0x54, 0x10, 0x02, 0x12, + 0x15, 0x0a, 0x11, 0x53, 0x45, 0x43, 0x54, 0x49, 0x4f, 0x4e, 0x5f, 0x43, 0x4f, 0x4e, 0x54, 0x41, + 0x49, 0x4e, 0x45, 0x52, 0x10, 0x03, 0x12, 0x13, 0x0a, 0x0f, 0x53, 0x45, 0x43, 0x54, 0x49, 0x4f, + 0x4e, 0x5f, 0x53, 0x45, 0x53, 0x53, 0x49, 0x4f, 0x4e, 0x10, 0x04, 0x2a, 0x11, 0x0a, 0x07, 0x53, + 0x75, 0x63, 0x63, 0x65, 0x73, 0x73, 0x12, 0x06, 0x0a, 0x02, 0x4f, 0x4b, 0x10, 0x00, 0x2a, 0x6f, + 0x0a, 0x0a, 0x43, 0x6f, 0x6d, 0x6d, 0x6f, 0x6e, 0x46, 0x61, 0x69, 0x6c, 0x12, 0x0c, 0x0a, 0x08, + 0x49, 0x4e, 0x54, 0x45, 0x52, 0x4e, 0x41, 0x4c, 0x10, 0x00, 0x12, 0x16, 0x0a, 0x12, 0x57, 0x52, + 0x4f, 0x4e, 0x47, 0x5f, 0x4d, 0x41, 0x47, 0x49, 0x43, 0x5f, 0x4e, 0x55, 0x4d, 0x42, 0x45, 0x52, + 0x10, 0x01, 0x12, 0x1f, 0x0a, 0x1b, 0x53, 0x49, 0x47, 0x4e, 0x41, 0x54, 0x55, 0x52, 0x45, 0x5f, + 0x56, 0x45, 0x52, 0x49, 0x46, 0x49, 0x43, 0x41, 0x54, 0x49, 0x4f, 0x4e, 0x5f, 0x46, 0x41, 0x49, + 0x4c, 0x10, 0x02, 0x12, 0x1a, 0x0a, 0x16, 0x4e, 0x4f, 0x44, 0x45, 0x5f, 0x55, 0x4e, 0x44, 0x45, + 0x52, 0x5f, 0x4d, 0x41, 0x49, 0x4e, 0x54, 0x45, 0x4e, 0x41, 0x4e, 0x43, 0x45, 0x10, 0x03, 0x2a, + 0x88, 0x01, 0x0a, 0x06, 0x4f, 0x62, 0x6a, 0x65, 0x63, 0x74, 0x12, 0x11, 0x0a, 0x0d, 0x41, 0x43, + 0x43, 0x45, 0x53, 0x53, 0x5f, 0x44, 0x45, 0x4e, 0x49, 0x45, 0x44, 0x10, 0x00, 0x12, 0x14, 0x0a, + 0x10, 0x4f, 0x42, 0x4a, 0x45, 0x43, 0x54, 0x5f, 0x4e, 0x4f, 0x54, 0x5f, 0x46, 0x4f, 0x55, 0x4e, + 0x44, 0x10, 0x01, 0x12, 0x0a, 0x0a, 0x06, 0x4c, 0x4f, 0x43, 0x4b, 0x45, 0x44, 0x10, 0x02, 0x12, + 0x1b, 0x0a, 0x17, 0x4c, 0x4f, 0x43, 0x4b, 0x5f, 0x4e, 0x4f, 0x4e, 0x5f, 0x52, 0x45, 0x47, 0x55, + 0x4c, 0x41, 0x52, 0x5f, 0x4f, 0x42, 0x4a, 0x45, 0x43, 0x54, 0x10, 0x03, 0x12, 0x1a, 0x0a, 0x16, + 0x4f, 0x42, 0x4a, 0x45, 0x43, 0x54, 0x5f, 0x41, 0x4c, 0x52, 0x45, 0x41, 0x44, 0x59, 0x5f, 0x52, + 0x45, 0x4d, 0x4f, 0x56, 0x45, 0x44, 0x10, 0x04, 0x12, 0x10, 0x0a, 0x0c, 0x4f, 0x55, 0x54, 0x5f, + 0x4f, 0x46, 0x5f, 0x52, 0x41, 0x4e, 0x47, 0x45, 0x10, 0x05, 0x2a, 0x38, 0x0a, 0x09, 0x43, 0x6f, + 0x6e, 0x74, 0x61, 0x69, 0x6e, 0x65, 0x72, 0x12, 0x17, 0x0a, 0x13, 0x43, 0x4f, 0x4e, 0x54, 0x41, + 0x49, 0x4e, 0x45, 0x52, 0x5f, 0x4e, 0x4f, 0x54, 0x5f, 0x46, 0x4f, 0x55, 0x4e, 0x44, 0x10, 0x00, + 0x12, 0x12, 0x0a, 0x0e, 0x45, 0x41, 0x43, 0x4c, 0x5f, 0x4e, 0x4f, 0x54, 0x5f, 0x46, 0x4f, 0x55, + 0x4e, 0x44, 0x10, 0x01, 0x2a, 0x31, 0x0a, 0x07, 0x53, 0x65, 0x73, 0x73, 0x69, 0x6f, 0x6e, 0x12, + 0x13, 0x0a, 0x0f, 0x54, 0x4f, 0x4b, 0x45, 0x4e, 0x5f, 0x4e, 0x4f, 0x54, 0x5f, 0x46, 0x4f, 0x55, + 0x4e, 0x44, 0x10, 0x00, 0x12, 0x11, 0x0a, 0x0d, 0x54, 0x4f, 0x4b, 0x45, 0x4e, 0x5f, 0x45, 0x58, + 0x50, 0x49, 0x52, 0x45, 0x44, 0x10, 0x01, 0x42, 0x56, 0x5a, 0x37, 0x67, 0x69, 0x74, 0x68, 0x75, + 0x62, 0x2e, 0x63, 0x6f, 0x6d, 0x2f, 0x6e, 0x73, 0x70, 0x63, 0x63, 0x2d, 0x64, 0x65, 0x76, 0x2f, + 0x6e, 0x65, 0x6f, 0x66, 0x73, 0x2d, 0x61, 0x70, 0x69, 0x2d, 0x67, 0x6f, 0x2f, 0x76, 0x32, 0x2f, + 0x73, 0x74, 0x61, 0x74, 0x75, 0x73, 0x2f, 0x67, 0x72, 0x70, 0x63, 0x3b, 0x73, 0x74, 0x61, 0x74, + 0x75, 0x73, 0xaa, 0x02, 0x1a, 0x4e, 0x65, 0x6f, 0x2e, 0x46, 0x69, 0x6c, 0x65, 0x53, 0x74, 0x6f, + 0x72, 0x61, 0x67, 0x65, 0x2e, 0x41, 0x50, 0x49, 0x2e, 0x53, 0x74, 0x61, 0x74, 0x75, 0x73, 0x62, + 0x06, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x33, +} + +var ( + file_status_grpc_types_proto_rawDescOnce sync.Once + file_status_grpc_types_proto_rawDescData = file_status_grpc_types_proto_rawDesc +) + +func file_status_grpc_types_proto_rawDescGZIP() []byte { + file_status_grpc_types_proto_rawDescOnce.Do(func() { + file_status_grpc_types_proto_rawDescData = protoimpl.X.CompressGZIP(file_status_grpc_types_proto_rawDescData) + }) + return file_status_grpc_types_proto_rawDescData +} + +var file_status_grpc_types_proto_enumTypes = make([]protoimpl.EnumInfo, 6) +var file_status_grpc_types_proto_msgTypes = make([]protoimpl.MessageInfo, 2) +var file_status_grpc_types_proto_goTypes = []interface{}{ + (Section)(0), // 0: neo.fs.v2.status.Section + (Success)(0), // 1: neo.fs.v2.status.Success + (CommonFail)(0), // 2: neo.fs.v2.status.CommonFail + (Object)(0), // 3: neo.fs.v2.status.Object + (Container)(0), // 4: neo.fs.v2.status.Container + (Session)(0), // 5: neo.fs.v2.status.Session + (*Status)(nil), // 6: neo.fs.v2.status.Status + (*Status_Detail)(nil), // 7: neo.fs.v2.status.Status.Detail +} +var file_status_grpc_types_proto_depIdxs = []int32{ + 7, // 0: neo.fs.v2.status.Status.details:type_name -> neo.fs.v2.status.Status.Detail + 1, // [1:1] is the sub-list for method output_type + 1, // [1:1] is the sub-list for method input_type + 1, // [1:1] is the sub-list for extension type_name + 1, // [1:1] is the sub-list for extension extendee + 0, // [0:1] is the sub-list for field type_name +} + +func init() { file_status_grpc_types_proto_init() } +func file_status_grpc_types_proto_init() { + if File_status_grpc_types_proto != nil { + return + } + if !protoimpl.UnsafeEnabled { + file_status_grpc_types_proto_msgTypes[0].Exporter = func(v interface{}, i int) interface{} { + switch v := v.(*Status); i { + case 0: + return &v.state + case 1: + return &v.sizeCache + case 2: + return &v.unknownFields + default: + return nil + } + } + file_status_grpc_types_proto_msgTypes[1].Exporter = func(v interface{}, i int) interface{} { + switch v := v.(*Status_Detail); i { + case 0: + return &v.state + case 1: + return &v.sizeCache + case 2: + return &v.unknownFields + default: + return nil + } + } + } + type x struct{} + out := protoimpl.TypeBuilder{ + File: protoimpl.DescBuilder{ + GoPackagePath: reflect.TypeOf(x{}).PkgPath(), + RawDescriptor: file_status_grpc_types_proto_rawDesc, + NumEnums: 6, + NumMessages: 2, + NumExtensions: 0, + NumServices: 0, + }, + GoTypes: file_status_grpc_types_proto_goTypes, + DependencyIndexes: file_status_grpc_types_proto_depIdxs, + EnumInfos: file_status_grpc_types_proto_enumTypes, + MessageInfos: file_status_grpc_types_proto_msgTypes, + }.Build() + File_status_grpc_types_proto = out.File + file_status_grpc_types_proto_rawDesc = nil + file_status_grpc_types_proto_goTypes = nil + file_status_grpc_types_proto_depIdxs = nil +} diff --git a/api/storagegroup/types.pb.go b/api/storagegroup/types.pb.go new file mode 100644 index 000000000..35f52d405 --- /dev/null +++ b/api/storagegroup/types.pb.go @@ -0,0 +1,211 @@ +// Code generated by protoc-gen-go. DO NOT EDIT. +// versions: +// protoc-gen-go v1.32.0 +// protoc v4.25.1 +// source: storagegroup/grpc/types.proto + +package storagegroup + +import ( + reflect "reflect" + sync "sync" + + "github.com/nspcc-dev/neofs-sdk-go/api/refs" + protoreflect "google.golang.org/protobuf/reflect/protoreflect" + protoimpl "google.golang.org/protobuf/runtime/protoimpl" +) + +const ( + // Verify that this generated code is sufficiently up-to-date. + _ = protoimpl.EnforceVersion(20 - protoimpl.MinVersion) + // Verify that runtime/protoimpl is sufficiently up-to-date. + _ = protoimpl.EnforceVersion(protoimpl.MaxVersion - 20) +) + +// StorageGroup keeps verification information for Data Audit sessions. Objects +// that require paid storage guarantees are gathered in `StorageGroups` with +// additional information used for the proof of storage. `StorageGroup` only +// contains objects from the same container. +// +// Being an object payload, StorageGroup may have expiration Epoch set with +// `__NEOFS__EXPIRATION_EPOCH` well-known attribute. When expired, StorageGroup +// will be ignored by InnerRing nodes during Data Audit cycles and will be +// deleted by Storage Nodes. +type StorageGroup struct { + state protoimpl.MessageState + sizeCache protoimpl.SizeCache + unknownFields protoimpl.UnknownFields + + // Total size of the payloads of objects in the storage group + ValidationDataSize uint64 `protobuf:"varint,1,opt,name=validation_data_size,json=validationDataSize,proto3" json:"validation_data_size,omitempty"` + // Homomorphic hash from the concatenation of the payloads of the storage + // group members. The order of concatenation is the same as the order of the + // members in the `members` field. + ValidationHash *refs.Checksum `protobuf:"bytes,2,opt,name=validation_hash,json=validationHash,proto3" json:"validation_hash,omitempty"` + // DEPRECATED. Last NeoFS epoch number of the storage group lifetime + // + // Deprecated: Marked as deprecated in storagegroup/grpc/types.proto. + ExpirationEpoch uint64 `protobuf:"varint,3,opt,name=expiration_epoch,json=expirationEpoch,proto3" json:"expiration_epoch,omitempty"` + // Strictly ordered list of storage group member objects. Members MUST be unique + Members []*refs.ObjectID `protobuf:"bytes,4,rep,name=members,proto3" json:"members,omitempty"` +} + +func (x *StorageGroup) Reset() { + *x = StorageGroup{} + if protoimpl.UnsafeEnabled { + mi := &file_storagegroup_grpc_types_proto_msgTypes[0] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) + } +} + +func (x *StorageGroup) String() string { + return protoimpl.X.MessageStringOf(x) +} + +func (*StorageGroup) ProtoMessage() {} + +func (x *StorageGroup) ProtoReflect() protoreflect.Message { + mi := &file_storagegroup_grpc_types_proto_msgTypes[0] + if protoimpl.UnsafeEnabled && x != nil { + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + if ms.LoadMessageInfo() == nil { + ms.StoreMessageInfo(mi) + } + return ms + } + return mi.MessageOf(x) +} + +// Deprecated: Use StorageGroup.ProtoReflect.Descriptor instead. +func (*StorageGroup) Descriptor() ([]byte, []int) { + return file_storagegroup_grpc_types_proto_rawDescGZIP(), []int{0} +} + +func (x *StorageGroup) GetValidationDataSize() uint64 { + if x != nil { + return x.ValidationDataSize + } + return 0 +} + +func (x *StorageGroup) GetValidationHash() *refs.Checksum { + if x != nil { + return x.ValidationHash + } + return nil +} + +// Deprecated: Marked as deprecated in storagegroup/grpc/types.proto. +func (x *StorageGroup) GetExpirationEpoch() uint64 { + if x != nil { + return x.ExpirationEpoch + } + return 0 +} + +func (x *StorageGroup) GetMembers() []*refs.ObjectID { + if x != nil { + return x.Members + } + return nil +} + +var File_storagegroup_grpc_types_proto protoreflect.FileDescriptor + +var file_storagegroup_grpc_types_proto_rawDesc = []byte{ + 0x0a, 0x1d, 0x73, 0x74, 0x6f, 0x72, 0x61, 0x67, 0x65, 0x67, 0x72, 0x6f, 0x75, 0x70, 0x2f, 0x67, + 0x72, 0x70, 0x63, 0x2f, 0x74, 0x79, 0x70, 0x65, 0x73, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x12, + 0x16, 0x6e, 0x65, 0x6f, 0x2e, 0x66, 0x73, 0x2e, 0x76, 0x32, 0x2e, 0x73, 0x74, 0x6f, 0x72, 0x61, + 0x67, 0x65, 0x67, 0x72, 0x6f, 0x75, 0x70, 0x1a, 0x15, 0x72, 0x65, 0x66, 0x73, 0x2f, 0x67, 0x72, + 0x70, 0x63, 0x2f, 0x74, 0x79, 0x70, 0x65, 0x73, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x22, 0xe6, + 0x01, 0x0a, 0x0c, 0x53, 0x74, 0x6f, 0x72, 0x61, 0x67, 0x65, 0x47, 0x72, 0x6f, 0x75, 0x70, 0x12, + 0x30, 0x0a, 0x14, 0x76, 0x61, 0x6c, 0x69, 0x64, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x5f, 0x64, 0x61, + 0x74, 0x61, 0x5f, 0x73, 0x69, 0x7a, 0x65, 0x18, 0x01, 0x20, 0x01, 0x28, 0x04, 0x52, 0x12, 0x76, + 0x61, 0x6c, 0x69, 0x64, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x44, 0x61, 0x74, 0x61, 0x53, 0x69, 0x7a, + 0x65, 0x12, 0x41, 0x0a, 0x0f, 0x76, 0x61, 0x6c, 0x69, 0x64, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x5f, + 0x68, 0x61, 0x73, 0x68, 0x18, 0x02, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x18, 0x2e, 0x6e, 0x65, 0x6f, + 0x2e, 0x66, 0x73, 0x2e, 0x76, 0x32, 0x2e, 0x72, 0x65, 0x66, 0x73, 0x2e, 0x43, 0x68, 0x65, 0x63, + 0x6b, 0x73, 0x75, 0x6d, 0x52, 0x0e, 0x76, 0x61, 0x6c, 0x69, 0x64, 0x61, 0x74, 0x69, 0x6f, 0x6e, + 0x48, 0x61, 0x73, 0x68, 0x12, 0x2d, 0x0a, 0x10, 0x65, 0x78, 0x70, 0x69, 0x72, 0x61, 0x74, 0x69, + 0x6f, 0x6e, 0x5f, 0x65, 0x70, 0x6f, 0x63, 0x68, 0x18, 0x03, 0x20, 0x01, 0x28, 0x04, 0x42, 0x02, + 0x18, 0x01, 0x52, 0x0f, 0x65, 0x78, 0x70, 0x69, 0x72, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x45, 0x70, + 0x6f, 0x63, 0x68, 0x12, 0x32, 0x0a, 0x07, 0x6d, 0x65, 0x6d, 0x62, 0x65, 0x72, 0x73, 0x18, 0x04, + 0x20, 0x03, 0x28, 0x0b, 0x32, 0x18, 0x2e, 0x6e, 0x65, 0x6f, 0x2e, 0x66, 0x73, 0x2e, 0x76, 0x32, + 0x2e, 0x72, 0x65, 0x66, 0x73, 0x2e, 0x4f, 0x62, 0x6a, 0x65, 0x63, 0x74, 0x49, 0x44, 0x52, 0x07, + 0x6d, 0x65, 0x6d, 0x62, 0x65, 0x72, 0x73, 0x42, 0x68, 0x5a, 0x43, 0x67, 0x69, 0x74, 0x68, 0x75, + 0x62, 0x2e, 0x63, 0x6f, 0x6d, 0x2f, 0x6e, 0x73, 0x70, 0x63, 0x63, 0x2d, 0x64, 0x65, 0x76, 0x2f, + 0x6e, 0x65, 0x6f, 0x66, 0x73, 0x2d, 0x61, 0x70, 0x69, 0x2d, 0x67, 0x6f, 0x2f, 0x76, 0x32, 0x2f, + 0x73, 0x74, 0x6f, 0x72, 0x61, 0x67, 0x65, 0x67, 0x72, 0x6f, 0x75, 0x70, 0x2f, 0x67, 0x72, 0x70, + 0x63, 0x3b, 0x73, 0x74, 0x6f, 0x72, 0x61, 0x67, 0x65, 0x67, 0x72, 0x6f, 0x75, 0x70, 0xaa, 0x02, + 0x20, 0x4e, 0x65, 0x6f, 0x2e, 0x46, 0x69, 0x6c, 0x65, 0x53, 0x74, 0x6f, 0x72, 0x61, 0x67, 0x65, + 0x2e, 0x41, 0x50, 0x49, 0x2e, 0x53, 0x74, 0x6f, 0x72, 0x61, 0x67, 0x65, 0x47, 0x72, 0x6f, 0x75, + 0x70, 0x62, 0x06, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x33, +} + +var ( + file_storagegroup_grpc_types_proto_rawDescOnce sync.Once + file_storagegroup_grpc_types_proto_rawDescData = file_storagegroup_grpc_types_proto_rawDesc +) + +func file_storagegroup_grpc_types_proto_rawDescGZIP() []byte { + file_storagegroup_grpc_types_proto_rawDescOnce.Do(func() { + file_storagegroup_grpc_types_proto_rawDescData = protoimpl.X.CompressGZIP(file_storagegroup_grpc_types_proto_rawDescData) + }) + return file_storagegroup_grpc_types_proto_rawDescData +} + +var file_storagegroup_grpc_types_proto_msgTypes = make([]protoimpl.MessageInfo, 1) +var file_storagegroup_grpc_types_proto_goTypes = []interface{}{ + (*StorageGroup)(nil), // 0: neo.fs.v2.storagegroup.StorageGroup + (*refs.Checksum)(nil), // 1: neo.fs.v2.refs.Checksum + (*refs.ObjectID)(nil), // 2: neo.fs.v2.refs.ObjectID +} +var file_storagegroup_grpc_types_proto_depIdxs = []int32{ + 1, // 0: neo.fs.v2.storagegroup.StorageGroup.validation_hash:type_name -> neo.fs.v2.refs.Checksum + 2, // 1: neo.fs.v2.storagegroup.StorageGroup.members:type_name -> neo.fs.v2.refs.ObjectID + 2, // [2:2] is the sub-list for method output_type + 2, // [2:2] is the sub-list for method input_type + 2, // [2:2] is the sub-list for extension type_name + 2, // [2:2] is the sub-list for extension extendee + 0, // [0:2] is the sub-list for field type_name +} + +func init() { file_storagegroup_grpc_types_proto_init() } +func file_storagegroup_grpc_types_proto_init() { + if File_storagegroup_grpc_types_proto != nil { + return + } + if !protoimpl.UnsafeEnabled { + file_storagegroup_grpc_types_proto_msgTypes[0].Exporter = func(v interface{}, i int) interface{} { + switch v := v.(*StorageGroup); i { + case 0: + return &v.state + case 1: + return &v.sizeCache + case 2: + return &v.unknownFields + default: + return nil + } + } + } + type x struct{} + out := protoimpl.TypeBuilder{ + File: protoimpl.DescBuilder{ + GoPackagePath: reflect.TypeOf(x{}).PkgPath(), + RawDescriptor: file_storagegroup_grpc_types_proto_rawDesc, + NumEnums: 0, + NumMessages: 1, + NumExtensions: 0, + NumServices: 0, + }, + GoTypes: file_storagegroup_grpc_types_proto_goTypes, + DependencyIndexes: file_storagegroup_grpc_types_proto_depIdxs, + MessageInfos: file_storagegroup_grpc_types_proto_msgTypes, + }.Build() + File_storagegroup_grpc_types_proto = out.File + file_storagegroup_grpc_types_proto_rawDesc = nil + file_storagegroup_grpc_types_proto_goTypes = nil + file_storagegroup_grpc_types_proto_depIdxs = nil +} diff --git a/api/subnet/types.pb.go b/api/subnet/types.pb.go new file mode 100644 index 000000000..6235c107d --- /dev/null +++ b/api/subnet/types.pb.go @@ -0,0 +1,172 @@ +// Code generated by protoc-gen-go. DO NOT EDIT. +// versions: +// protoc-gen-go v1.32.0 +// protoc v4.25.1 +// source: subnet/grpc/types.proto + +package subnet + +import ( + grpc "github.com/nspcc-dev/neofs-api-go/v2/refs/grpc" + protoreflect "google.golang.org/protobuf/reflect/protoreflect" + protoimpl "google.golang.org/protobuf/runtime/protoimpl" + reflect "reflect" + sync "sync" +) + +const ( + // Verify that this generated code is sufficiently up-to-date. + _ = protoimpl.EnforceVersion(20 - protoimpl.MinVersion) + // Verify that runtime/protoimpl is sufficiently up-to-date. + _ = protoimpl.EnforceVersion(protoimpl.MaxVersion - 20) +) + +// NeoFS subnetwork description +// +// DEPRECATED. Ignored and kept for compatibility only. +type SubnetInfo struct { + state protoimpl.MessageState + sizeCache protoimpl.SizeCache + unknownFields protoimpl.UnknownFields + + // Unique subnet identifier. Missing ID is + // equivalent to zero (default subnetwork) ID. + Id *grpc.SubnetID `protobuf:"bytes,1,opt,name=id,proto3" json:"id,omitempty"` + // Identifier of the subnetwork owner + Owner *grpc.OwnerID `protobuf:"bytes,2,opt,name=owner,proto3" json:"owner,omitempty"` +} + +func (x *SubnetInfo) Reset() { + *x = SubnetInfo{} + if protoimpl.UnsafeEnabled { + mi := &file_subnet_grpc_types_proto_msgTypes[0] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) + } +} + +func (x *SubnetInfo) String() string { + return protoimpl.X.MessageStringOf(x) +} + +func (*SubnetInfo) ProtoMessage() {} + +func (x *SubnetInfo) ProtoReflect() protoreflect.Message { + mi := &file_subnet_grpc_types_proto_msgTypes[0] + if protoimpl.UnsafeEnabled && x != nil { + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + if ms.LoadMessageInfo() == nil { + ms.StoreMessageInfo(mi) + } + return ms + } + return mi.MessageOf(x) +} + +// Deprecated: Use SubnetInfo.ProtoReflect.Descriptor instead. +func (*SubnetInfo) Descriptor() ([]byte, []int) { + return file_subnet_grpc_types_proto_rawDescGZIP(), []int{0} +} + +func (x *SubnetInfo) GetId() *grpc.SubnetID { + if x != nil { + return x.Id + } + return nil +} + +func (x *SubnetInfo) GetOwner() *grpc.OwnerID { + if x != nil { + return x.Owner + } + return nil +} + +var File_subnet_grpc_types_proto protoreflect.FileDescriptor + +var file_subnet_grpc_types_proto_rawDesc = []byte{ + 0x0a, 0x17, 0x73, 0x75, 0x62, 0x6e, 0x65, 0x74, 0x2f, 0x67, 0x72, 0x70, 0x63, 0x2f, 0x74, 0x79, + 0x70, 0x65, 0x73, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x12, 0x10, 0x6e, 0x65, 0x6f, 0x2e, 0x66, + 0x73, 0x2e, 0x76, 0x32, 0x2e, 0x73, 0x75, 0x62, 0x6e, 0x65, 0x74, 0x1a, 0x15, 0x72, 0x65, 0x66, + 0x73, 0x2f, 0x67, 0x72, 0x70, 0x63, 0x2f, 0x74, 0x79, 0x70, 0x65, 0x73, 0x2e, 0x70, 0x72, 0x6f, + 0x74, 0x6f, 0x22, 0x65, 0x0a, 0x0a, 0x53, 0x75, 0x62, 0x6e, 0x65, 0x74, 0x49, 0x6e, 0x66, 0x6f, + 0x12, 0x28, 0x0a, 0x02, 0x69, 0x64, 0x18, 0x01, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x18, 0x2e, 0x6e, + 0x65, 0x6f, 0x2e, 0x66, 0x73, 0x2e, 0x76, 0x32, 0x2e, 0x72, 0x65, 0x66, 0x73, 0x2e, 0x53, 0x75, + 0x62, 0x6e, 0x65, 0x74, 0x49, 0x44, 0x52, 0x02, 0x69, 0x64, 0x12, 0x2d, 0x0a, 0x05, 0x6f, 0x77, + 0x6e, 0x65, 0x72, 0x18, 0x02, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x17, 0x2e, 0x6e, 0x65, 0x6f, 0x2e, + 0x66, 0x73, 0x2e, 0x76, 0x32, 0x2e, 0x72, 0x65, 0x66, 0x73, 0x2e, 0x4f, 0x77, 0x6e, 0x65, 0x72, + 0x49, 0x44, 0x52, 0x05, 0x6f, 0x77, 0x6e, 0x65, 0x72, 0x42, 0x56, 0x5a, 0x37, 0x67, 0x69, 0x74, + 0x68, 0x75, 0x62, 0x2e, 0x63, 0x6f, 0x6d, 0x2f, 0x6e, 0x73, 0x70, 0x63, 0x63, 0x2d, 0x64, 0x65, + 0x76, 0x2f, 0x6e, 0x65, 0x6f, 0x66, 0x73, 0x2d, 0x61, 0x70, 0x69, 0x2d, 0x67, 0x6f, 0x2f, 0x76, + 0x32, 0x2f, 0x73, 0x75, 0x62, 0x6e, 0x65, 0x74, 0x2f, 0x67, 0x72, 0x70, 0x63, 0x3b, 0x73, 0x75, + 0x62, 0x6e, 0x65, 0x74, 0xaa, 0x02, 0x1a, 0x4e, 0x65, 0x6f, 0x2e, 0x46, 0x69, 0x6c, 0x65, 0x53, + 0x74, 0x6f, 0x72, 0x61, 0x67, 0x65, 0x2e, 0x41, 0x50, 0x49, 0x2e, 0x53, 0x75, 0x62, 0x6e, 0x65, + 0x74, 0x62, 0x06, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x33, +} + +var ( + file_subnet_grpc_types_proto_rawDescOnce sync.Once + file_subnet_grpc_types_proto_rawDescData = file_subnet_grpc_types_proto_rawDesc +) + +func file_subnet_grpc_types_proto_rawDescGZIP() []byte { + file_subnet_grpc_types_proto_rawDescOnce.Do(func() { + file_subnet_grpc_types_proto_rawDescData = protoimpl.X.CompressGZIP(file_subnet_grpc_types_proto_rawDescData) + }) + return file_subnet_grpc_types_proto_rawDescData +} + +var file_subnet_grpc_types_proto_msgTypes = make([]protoimpl.MessageInfo, 1) +var file_subnet_grpc_types_proto_goTypes = []interface{}{ + (*SubnetInfo)(nil), // 0: neo.fs.v2.subnet.SubnetInfo + (*grpc.SubnetID)(nil), // 1: neo.fs.v2.refs.SubnetID + (*grpc.OwnerID)(nil), // 2: neo.fs.v2.refs.OwnerID +} +var file_subnet_grpc_types_proto_depIdxs = []int32{ + 1, // 0: neo.fs.v2.subnet.SubnetInfo.id:type_name -> neo.fs.v2.refs.SubnetID + 2, // 1: neo.fs.v2.subnet.SubnetInfo.owner:type_name -> neo.fs.v2.refs.OwnerID + 2, // [2:2] is the sub-list for method output_type + 2, // [2:2] is the sub-list for method input_type + 2, // [2:2] is the sub-list for extension type_name + 2, // [2:2] is the sub-list for extension extendee + 0, // [0:2] is the sub-list for field type_name +} + +func init() { file_subnet_grpc_types_proto_init() } +func file_subnet_grpc_types_proto_init() { + if File_subnet_grpc_types_proto != nil { + return + } + if !protoimpl.UnsafeEnabled { + file_subnet_grpc_types_proto_msgTypes[0].Exporter = func(v interface{}, i int) interface{} { + switch v := v.(*SubnetInfo); i { + case 0: + return &v.state + case 1: + return &v.sizeCache + case 2: + return &v.unknownFields + default: + return nil + } + } + } + type x struct{} + out := protoimpl.TypeBuilder{ + File: protoimpl.DescBuilder{ + GoPackagePath: reflect.TypeOf(x{}).PkgPath(), + RawDescriptor: file_subnet_grpc_types_proto_rawDesc, + NumEnums: 0, + NumMessages: 1, + NumExtensions: 0, + NumServices: 0, + }, + GoTypes: file_subnet_grpc_types_proto_goTypes, + DependencyIndexes: file_subnet_grpc_types_proto_depIdxs, + MessageInfos: file_subnet_grpc_types_proto_msgTypes, + }.Build() + File_subnet_grpc_types_proto = out.File + file_subnet_grpc_types_proto_rawDesc = nil + file_subnet_grpc_types_proto_goTypes = nil + file_subnet_grpc_types_proto_depIdxs = nil +} diff --git a/api/tombstone/types.pb.go b/api/tombstone/types.pb.go new file mode 100644 index 000000000..01c73fb8b --- /dev/null +++ b/api/tombstone/types.pb.go @@ -0,0 +1,189 @@ +// Code generated by protoc-gen-go. DO NOT EDIT. +// versions: +// protoc-gen-go v1.32.0 +// protoc v4.25.1 +// source: tombstone/grpc/types.proto + +package tombstone + +import ( + reflect "reflect" + sync "sync" + + "github.com/nspcc-dev/neofs-sdk-go/api/refs" + protoreflect "google.golang.org/protobuf/reflect/protoreflect" + protoimpl "google.golang.org/protobuf/runtime/protoimpl" +) + +const ( + // Verify that this generated code is sufficiently up-to-date. + _ = protoimpl.EnforceVersion(20 - protoimpl.MinVersion) + // Verify that runtime/protoimpl is sufficiently up-to-date. + _ = protoimpl.EnforceVersion(protoimpl.MaxVersion - 20) +) + +// Tombstone keeps record of deleted objects for a few epochs until they are +// purged from the NeoFS network. +type Tombstone struct { + state protoimpl.MessageState + sizeCache protoimpl.SizeCache + unknownFields protoimpl.UnknownFields + + // Last NeoFS epoch number of the tombstone lifetime. It's set by the tombstone + // creator depending on the current NeoFS network settings. + // DEPRECATED. Field ignored by servers, set corresponding object attribute + // `__NEOFS__EXPIRATION_EPOCH` only. + // + // Deprecated: Marked as deprecated in tombstone/grpc/types.proto. + ExpirationEpoch uint64 `protobuf:"varint,1,opt,name=expiration_epoch,json=expirationEpoch,proto3" json:"expiration_epoch,omitempty"` + // 16 byte UUID used to identify the split object hierarchy parts. Must be + // unique inside a container. All objects participating in the split must + // have the same `split_id` value. + SplitId []byte `protobuf:"bytes,2,opt,name=split_id,json=splitID,proto3" json:"split_id,omitempty"` + // List of objects to be deleted. + Members []*refs.ObjectID `protobuf:"bytes,3,rep,name=members,proto3" json:"members,omitempty"` +} + +func (x *Tombstone) Reset() { + *x = Tombstone{} + if protoimpl.UnsafeEnabled { + mi := &file_tombstone_grpc_types_proto_msgTypes[0] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) + } +} + +func (x *Tombstone) String() string { + return protoimpl.X.MessageStringOf(x) +} + +func (*Tombstone) ProtoMessage() {} + +func (x *Tombstone) ProtoReflect() protoreflect.Message { + mi := &file_tombstone_grpc_types_proto_msgTypes[0] + if protoimpl.UnsafeEnabled && x != nil { + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + if ms.LoadMessageInfo() == nil { + ms.StoreMessageInfo(mi) + } + return ms + } + return mi.MessageOf(x) +} + +// Deprecated: Use Tombstone.ProtoReflect.Descriptor instead. +func (*Tombstone) Descriptor() ([]byte, []int) { + return file_tombstone_grpc_types_proto_rawDescGZIP(), []int{0} +} + +// Deprecated: Marked as deprecated in tombstone/grpc/types.proto. +func (x *Tombstone) GetExpirationEpoch() uint64 { + if x != nil { + return x.ExpirationEpoch + } + return 0 +} + +func (x *Tombstone) GetSplitId() []byte { + if x != nil { + return x.SplitId + } + return nil +} + +func (x *Tombstone) GetMembers() []*refs.ObjectID { + if x != nil { + return x.Members + } + return nil +} + +var File_tombstone_grpc_types_proto protoreflect.FileDescriptor + +var file_tombstone_grpc_types_proto_rawDesc = []byte{ + 0x0a, 0x1a, 0x74, 0x6f, 0x6d, 0x62, 0x73, 0x74, 0x6f, 0x6e, 0x65, 0x2f, 0x67, 0x72, 0x70, 0x63, + 0x2f, 0x74, 0x79, 0x70, 0x65, 0x73, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x12, 0x13, 0x6e, 0x65, + 0x6f, 0x2e, 0x66, 0x73, 0x2e, 0x76, 0x32, 0x2e, 0x74, 0x6f, 0x6d, 0x62, 0x73, 0x74, 0x6f, 0x6e, + 0x65, 0x1a, 0x15, 0x72, 0x65, 0x66, 0x73, 0x2f, 0x67, 0x72, 0x70, 0x63, 0x2f, 0x74, 0x79, 0x70, + 0x65, 0x73, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x22, 0x89, 0x01, 0x0a, 0x09, 0x54, 0x6f, 0x6d, + 0x62, 0x73, 0x74, 0x6f, 0x6e, 0x65, 0x12, 0x2d, 0x0a, 0x10, 0x65, 0x78, 0x70, 0x69, 0x72, 0x61, + 0x74, 0x69, 0x6f, 0x6e, 0x5f, 0x65, 0x70, 0x6f, 0x63, 0x68, 0x18, 0x01, 0x20, 0x01, 0x28, 0x04, + 0x42, 0x02, 0x18, 0x01, 0x52, 0x0f, 0x65, 0x78, 0x70, 0x69, 0x72, 0x61, 0x74, 0x69, 0x6f, 0x6e, + 0x45, 0x70, 0x6f, 0x63, 0x68, 0x12, 0x19, 0x0a, 0x08, 0x73, 0x70, 0x6c, 0x69, 0x74, 0x5f, 0x69, + 0x64, 0x18, 0x02, 0x20, 0x01, 0x28, 0x0c, 0x52, 0x07, 0x73, 0x70, 0x6c, 0x69, 0x74, 0x49, 0x44, + 0x12, 0x32, 0x0a, 0x07, 0x6d, 0x65, 0x6d, 0x62, 0x65, 0x72, 0x73, 0x18, 0x03, 0x20, 0x03, 0x28, + 0x0b, 0x32, 0x18, 0x2e, 0x6e, 0x65, 0x6f, 0x2e, 0x66, 0x73, 0x2e, 0x76, 0x32, 0x2e, 0x72, 0x65, + 0x66, 0x73, 0x2e, 0x4f, 0x62, 0x6a, 0x65, 0x63, 0x74, 0x49, 0x44, 0x52, 0x07, 0x6d, 0x65, 0x6d, + 0x62, 0x65, 0x72, 0x73, 0x42, 0x5f, 0x5a, 0x3d, 0x67, 0x69, 0x74, 0x68, 0x75, 0x62, 0x2e, 0x63, + 0x6f, 0x6d, 0x2f, 0x6e, 0x73, 0x70, 0x63, 0x63, 0x2d, 0x64, 0x65, 0x76, 0x2f, 0x6e, 0x65, 0x6f, + 0x66, 0x73, 0x2d, 0x61, 0x70, 0x69, 0x2d, 0x67, 0x6f, 0x2f, 0x76, 0x32, 0x2f, 0x74, 0x6f, 0x6d, + 0x62, 0x73, 0x74, 0x6f, 0x6e, 0x65, 0x2f, 0x67, 0x72, 0x70, 0x63, 0x3b, 0x74, 0x6f, 0x6d, 0x62, + 0x73, 0x74, 0x6f, 0x6e, 0x65, 0xaa, 0x02, 0x1d, 0x4e, 0x65, 0x6f, 0x2e, 0x46, 0x69, 0x6c, 0x65, + 0x53, 0x74, 0x6f, 0x72, 0x61, 0x67, 0x65, 0x2e, 0x41, 0x50, 0x49, 0x2e, 0x54, 0x6f, 0x6d, 0x62, + 0x73, 0x74, 0x6f, 0x6e, 0x65, 0x62, 0x06, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x33, +} + +var ( + file_tombstone_grpc_types_proto_rawDescOnce sync.Once + file_tombstone_grpc_types_proto_rawDescData = file_tombstone_grpc_types_proto_rawDesc +) + +func file_tombstone_grpc_types_proto_rawDescGZIP() []byte { + file_tombstone_grpc_types_proto_rawDescOnce.Do(func() { + file_tombstone_grpc_types_proto_rawDescData = protoimpl.X.CompressGZIP(file_tombstone_grpc_types_proto_rawDescData) + }) + return file_tombstone_grpc_types_proto_rawDescData +} + +var file_tombstone_grpc_types_proto_msgTypes = make([]protoimpl.MessageInfo, 1) +var file_tombstone_grpc_types_proto_goTypes = []interface{}{ + (*Tombstone)(nil), // 0: neo.fs.v2.tombstone.Tombstone + (*refs.ObjectID)(nil), // 1: neo.fs.v2.refs.ObjectID +} +var file_tombstone_grpc_types_proto_depIdxs = []int32{ + 1, // 0: neo.fs.v2.tombstone.Tombstone.members:type_name -> neo.fs.v2.refs.ObjectID + 1, // [1:1] is the sub-list for method output_type + 1, // [1:1] is the sub-list for method input_type + 1, // [1:1] is the sub-list for extension type_name + 1, // [1:1] is the sub-list for extension extendee + 0, // [0:1] is the sub-list for field type_name +} + +func init() { file_tombstone_grpc_types_proto_init() } +func file_tombstone_grpc_types_proto_init() { + if File_tombstone_grpc_types_proto != nil { + return + } + if !protoimpl.UnsafeEnabled { + file_tombstone_grpc_types_proto_msgTypes[0].Exporter = func(v interface{}, i int) interface{} { + switch v := v.(*Tombstone); i { + case 0: + return &v.state + case 1: + return &v.sizeCache + case 2: + return &v.unknownFields + default: + return nil + } + } + } + type x struct{} + out := protoimpl.TypeBuilder{ + File: protoimpl.DescBuilder{ + GoPackagePath: reflect.TypeOf(x{}).PkgPath(), + RawDescriptor: file_tombstone_grpc_types_proto_rawDesc, + NumEnums: 0, + NumMessages: 1, + NumExtensions: 0, + NumServices: 0, + }, + GoTypes: file_tombstone_grpc_types_proto_goTypes, + DependencyIndexes: file_tombstone_grpc_types_proto_depIdxs, + MessageInfos: file_tombstone_grpc_types_proto_msgTypes, + }.Build() + File_tombstone_grpc_types_proto = out.File + file_tombstone_grpc_types_proto_rawDesc = nil + file_tombstone_grpc_types_proto_goTypes = nil + file_tombstone_grpc_types_proto_depIdxs = nil +} diff --git a/audit/collect.go b/audit/collect.go index f9d38adca..d9a31b97e 100644 --- a/audit/collect.go +++ b/audit/collect.go @@ -1,81 +1,68 @@ package audit -import ( - "context" - "fmt" - - "github.com/nspcc-dev/neofs-sdk-go/checksum" - "github.com/nspcc-dev/neofs-sdk-go/client" - cid "github.com/nspcc-dev/neofs-sdk-go/container/id" - oid "github.com/nspcc-dev/neofs-sdk-go/object/id" - "github.com/nspcc-dev/neofs-sdk-go/object/relations" - "github.com/nspcc-dev/neofs-sdk-go/storagegroup" - "github.com/nspcc-dev/neofs-sdk-go/user" - "github.com/nspcc-dev/tzhash/tz" -) - -// CollectMembers creates new storage group structure and fills it -// with information about members collected via HeadReceiver. // -// Resulting storage group consists of physically stored objects only. -func CollectMembers( - ctx context.Context, - collector relations.Executor, - cnr cid.ID, - members []oid.ID, - tokens relations.Tokens, - calcHomoHash bool, - signer user.Signer, -) (*storagegroup.StorageGroup, error) { - var ( - err error - sumPhySize uint64 - phyMembers []oid.ID - phyHashes [][]byte - addr oid.Address - sg storagegroup.StorageGroup - ) - - addr.SetContainer(cnr) - - for i := range members { - if phyMembers, _, err = relations.Get(ctx, collector, cnr, members[i], tokens, signer); err != nil { - return nil, err - } - - var prmHead client.PrmObjectHead - for _, phyMember := range phyMembers { - addr.SetObject(phyMember) - hdr, err := collector.ObjectHead(ctx, addr.Container(), addr.Object(), signer, prmHead) - if err != nil { - return nil, fmt.Errorf("head phy member '%s': %w", phyMember.EncodeToString(), err) - } - - sumPhySize += hdr.PayloadSize() - cs, _ := hdr.PayloadHomomorphicHash() - - if calcHomoHash { - phyHashes = append(phyHashes, cs.Value()) - } - } - } - - sg.SetMembers(phyMembers) - sg.SetValidationDataSize(sumPhySize) - - if calcHomoHash { - sumHash, err := tz.Concat(phyHashes) - if err != nil { - return nil, err - } - - var cs checksum.Checksum - tzHash := [64]byte{} - copy(tzHash[:], sumHash) - cs.SetTillichZemor(tzHash) - - sg.SetValidationDataHash(cs) - } - - return &sg, nil -} +// // CollectMembers creates new storage group structure and fills it +// // with information about members collected via HeadReceiver. +// // +// // Resulting storage group consists of physically stored objects only. +// func CollectMembers( +// ctx context.Context, +// collector relations.Executor, +// cnr cid.ID, +// members []oid.ID, +// tokens relations.Tokens, +// calcHomoHash bool, +// signer user.Signer, +// ) (*storagegroup.StorageGroup, error) { +// var ( +// err error +// sumPhySize uint64 +// phyMembers []oid.ID +// phyHashes [][]byte +// addr oid.Address +// sg storagegroup.StorageGroup +// ) +// +// addr.SetContainer(cnr) +// +// for i := range members { +// if phyMembers, _, err = relations.Get(ctx, collector, cnr, members[i], tokens, signer); err != nil { +// return nil, err +// } +// +// var prmHead client.PrmObjectHead +// for _, phyMember := range phyMembers { +// addr.SetObject(phyMember) +// hdr, err := collector.ObjectHead(ctx, addr.Container(), addr.Object(), signer, prmHead) +// if err != nil { +// return nil, fmt.Errorf("head phy member '%s': %w", phyMember.EncodeToString(), err) +// } +// +// sumPhySize += hdr.PayloadSize() +// cs, _ := hdr.PayloadHomomorphicHash() +// +// if calcHomoHash { +// phyHashes = append(phyHashes, cs.Value()) +// } +// } +// } +// +// sg.SetMembers(phyMembers) +// sg.SetValidationDataSize(sumPhySize) +// +// if calcHomoHash { +// sumHash, err := tz.Concat(phyHashes) +// if err != nil { +// return nil, err +// } +// +// var cs checksum.Checksum +// tzHash := [64]byte{} +// copy(tzHash[:], sumHash) +// cs.SetTillichZemor(tzHash) +// +// sg.SetValidationDataHash(cs) +// } +// +// return &sg, nil +// } diff --git a/audit/example_test.go b/audit/example_test.go index abd7ca1b5..ad733d8c5 100644 --- a/audit/example_test.go +++ b/audit/example_test.go @@ -12,7 +12,7 @@ func ExampleResult() { res.ForEpoch(32) res.ForContainer(cnr) // ... - res.Complete() + res.SetCompleted(true) // Result instances can be stored in a binary format on client side. data := res.Marshal() diff --git a/audit/result.go b/audit/result.go index d733de27d..1d6a92c8a 100644 --- a/audit/result.go +++ b/audit/result.go @@ -4,90 +4,164 @@ import ( "errors" "fmt" - "github.com/nspcc-dev/neofs-api-go/v2/audit" - "github.com/nspcc-dev/neofs-api-go/v2/refs" + "github.com/nspcc-dev/neofs-sdk-go/api/audit" + "github.com/nspcc-dev/neofs-sdk-go/api/refs" cid "github.com/nspcc-dev/neofs-sdk-go/container/id" oid "github.com/nspcc-dev/neofs-sdk-go/object/id" "github.com/nspcc-dev/neofs-sdk-go/version" + "google.golang.org/protobuf/proto" ) // Result represents report on the results of the data audit in NeoFS system. // -// Result is mutually binary-compatible with github.com/nspcc-dev/neofs-api-go/v2/audit.DataAuditResult -// message. See Marshal / Unmarshal methods. -// // Instances can be created using built-in var declaration. type Result struct { - versionEncoded bool + decoded bool + + versionSet bool + version version.Version + + auditEpoch uint64 + auditorPubKey []byte + + cnrSet bool + cnr cid.ID + + completed bool + + requestsPoR, retriesPoR uint32 + + hits, misses, fails uint32 - v2 audit.DataAuditResult + passSG, failSG []oid.ID + + passNodes, failNodes [][]byte } -// Marshal encodes Result into a canonical NeoFS binary format (Protocol Buffers -// with direct field order). -// -// Writes version.Current() protocol version into the resulting message if Result -// hasn't been already decoded from such a message using Unmarshal. -// -// See also Unmarshal. -func (r *Result) Marshal() []byte { - if !r.versionEncoded { - var verV2 refs.Version - version.Current().WriteToV2(&verV2) - r.v2.SetVersion(&verV2) - r.versionEncoded = true +// Marshal encodes Result into a Protocol Buffers V3 binary format. +// +// Writes current protocol version into the resulting message if Result hasn't +// been already decoded from such a message. +// +// See also [Result.Unmarshal]. +func (r Result) Marshal() []byte { + m := &audit.DataAuditResult{ + AuditEpoch: r.auditEpoch, + PublicKey: r.auditorPubKey, + Complete: r.completed, + Requests: r.requestsPoR, + Retries: r.retriesPoR, + Hit: r.hits, + Miss: r.misses, + Fail: r.fails, + PassNodes: r.passNodes, + FailNodes: r.failNodes, + } + if r.versionSet { + m.Version = new(refs.Version) + r.version.WriteToV2(m.Version) + } else if !r.decoded { + m.Version = new(refs.Version) + version.Current().WriteToV2(m.Version) + } + if r.cnrSet { + m.ContainerId = new(refs.ContainerID) + r.cnr.WriteToV2(m.ContainerId) + } + if r.passSG != nil { + m.PassSg = make([]*refs.ObjectID, len(r.passSG)) + for i := range r.passSG { + m.PassSg[i] = new(refs.ObjectID) + r.passSG[i].WriteToV2(m.PassSg[i]) + } + } + if r.failSG != nil { + m.FailSg = make([]*refs.ObjectID, len(r.failSG)) + for i := range r.failSG { + m.FailSg[i] = new(refs.ObjectID) + r.failSG[i].WriteToV2(m.FailSg[i]) + } } - return r.v2.StableMarshal(nil) + b, err := proto.Marshal(m) + if err != nil { + // while it is bad to panic on external package return, we can do nothing better + // for this case: how can a normal message not be encoded? + panic(fmt.Errorf("unexpected marshal protobuf message failure: %w", err)) + } + return b } var errCIDNotSet = errors.New("container ID is not set") -// Unmarshal decodes Result from its canonical NeoFS binary format (Protocol Buffers -// with direct field order). Returns an error describing a format violation. +// Unmarshal decodes Protocol Buffers V3 binary data into the Result. Returns an +// error describing a format violation of the specified fields. Unmarshal does +// not check presence of the required fields and, at the same time, checks +// format of presented fields. // -// See also Marshal. +// See also [Result.Marshal]. func (r *Result) Unmarshal(data []byte) error { - err := r.v2.Unmarshal(data) + var m audit.DataAuditResult + err := proto.Unmarshal(data, &m) if err != nil { - return err + return fmt.Errorf("decode protobuf: %w", err) } - r.versionEncoded = true - // format checks - - var cID cid.ID - - cidV2 := r.v2.GetContainerID() - if cidV2 == nil { + r.cnrSet = m.ContainerId != nil + if !r.cnrSet { return errCIDNotSet } - err = cID.ReadFromV2(*cidV2) + err = r.cnr.ReadFromV2(m.ContainerId) if err != nil { - return fmt.Errorf("could not convert V2 container ID: %w", err) + return fmt.Errorf("invalid container ID: %w", err) } - var ( - oID oid.ID - oidV2 refs.ObjectID - ) - - for _, oidV2 = range r.v2.GetPassSG() { - err = oID.ReadFromV2(oidV2) + r.versionSet = m.Version != nil + if r.versionSet { + err = r.version.ReadFromV2(m.Version) if err != nil { - return fmt.Errorf("invalid passed storage group ID: %w", err) + return fmt.Errorf("invalid protocol version: %w", err) } } - for _, oidV2 = range r.v2.GetFailSG() { - err = oID.ReadFromV2(oidV2) - if err != nil { - return fmt.Errorf("invalid failed storage group ID: %w", err) + if m.PassSg != nil { + r.passSG = make([]oid.ID, len(m.PassSg)) + for i := range m.PassSg { + err = r.passSG[i].ReadFromV2(m.PassSg[i]) + if err != nil { + return fmt.Errorf("invalid passed storage group ID #%d: %w", i, err) + } + } + } else { + r.passSG = nil + } + + if m.FailSg != nil { + r.failSG = make([]oid.ID, len(m.FailSg)) + for i := range m.FailSg { + err = r.failSG[i].ReadFromV2(m.FailSg[i]) + if err != nil { + return fmt.Errorf("invalid failed storage group ID #%d: %w", i, err) + } } + } else { + r.failSG = nil } + r.auditEpoch = m.AuditEpoch + r.auditorPubKey = m.PublicKey + r.completed = m.Complete + r.requestsPoR = m.Requests + r.retriesPoR = m.Retries + r.hits = m.Hit + r.misses = m.Miss + r.fails = m.Fail + r.passNodes = m.PassNodes + r.failNodes = m.FailNodes + r.decoded = true + return nil } @@ -95,16 +169,16 @@ func (r *Result) Unmarshal(data []byte) error { // // Zero Result has zero epoch. // -// See also ForEpoch. +// See also [Result.ForEpoch]. func (r Result) Epoch() uint64 { - return r.v2.GetAuditEpoch() + return r.auditEpoch } // ForEpoch specifies NeoFS epoch when the data associated with the Result was audited. // -// See also Epoch. +// See also [Result.Epoch]. func (r *Result) ForEpoch(epoch uint64) { - r.v2.SetAuditEpoch(epoch) + r.auditEpoch = epoch } // Container returns identifier of the container with which the data audit Result @@ -112,28 +186,17 @@ func (r *Result) ForEpoch(epoch uint64) { // // Zero Result does not have container ID. // -// See also ForContainer. +// See also [Result.ForContainer]. func (r Result) Container() (cid.ID, bool) { - var cID cid.ID - - cidV2 := r.v2.GetContainerID() - if cidV2 != nil { - _ = cID.ReadFromV2(*cidV2) - return cID, true - } - - return cID, false + return r.cnr, r.cnrSet } // ForContainer sets identifier of the container with which the data audit Result // is associated. // -// See also Container. +// See also [Result.Container]. func (r *Result) ForContainer(cnr cid.ID) { - var cidV2 refs.ContainerID - cnr.WriteToV2(&cidV2) - - r.v2.SetContainerID(&cidV2) + r.cnr, r.cnrSet = cnr, true } // AuditorKey returns public key of the auditing NeoFS Inner Ring node in @@ -149,7 +212,7 @@ func (r *Result) ForContainer(cnr cid.ID) { // // See also [Result.SetAuditorKey]. func (r Result) AuditorKey() []byte { - return r.v2.GetPublicKey() + return r.auditorPubKey } // SetAuditorKey specifies public key of the auditing NeoFS Inner Ring node in @@ -161,23 +224,23 @@ func (r Result) AuditorKey() []byte { // // See also [Result.AuditorKey]. func (r *Result) SetAuditorKey(key []byte) { - r.v2.SetPublicKey(key) + r.auditorPubKey = key } // Completed returns completion state of the data audit associated with the Result. // // Zero Result corresponds to incomplete data audit. // -// See also Complete. +// See also [Result.SetCompleted]. func (r Result) Completed() bool { - return r.v2.GetComplete() + return r.completed } -// Complete marks the data audit associated with the Result as completed. +// SetCompleted sets data audit completion flag. // -// See also Completed. -func (r *Result) Complete() { - r.v2.SetComplete(true) +// See also [Result.SetCompleted]. +func (r *Result) SetCompleted(completed bool) { + r.completed = completed } // RequestsPoR returns number of requests made by Proof-of-Retrievability @@ -185,17 +248,17 @@ func (r *Result) Complete() { // // Zero Result has zero requests. // -// See also SetRequestsPoR. +// See also [Result.SetRequestsPoR]. func (r Result) RequestsPoR() uint32 { - return r.v2.GetRequests() + return r.requestsPoR } // SetRequestsPoR sets number of requests made by Proof-of-Retrievability // audit check to get all headers of the objects inside storage groups. // -// See also RequestsPoR. +// See also [Result.RequestsPoR]. func (r *Result) SetRequestsPoR(v uint32) { - r.v2.SetRequests(v) + r.requestsPoR = v } // RetriesPoR returns number of retries made by Proof-of-Retrievability @@ -203,74 +266,54 @@ func (r *Result) SetRequestsPoR(v uint32) { // // Zero Result has zero retries. // -// See also SetRetriesPoR. +// See also [Result.SetRetriesPoR]. func (r Result) RetriesPoR() uint32 { - return r.v2.GetRetries() + return r.retriesPoR } // SetRetriesPoR sets number of retries made by Proof-of-Retrievability // audit check to get all headers of the objects inside storage groups. // -// See also RetriesPoR. +// See also [Result.RetriesPoR]. func (r *Result) SetRetriesPoR(v uint32) { - r.v2.SetRetries(v) + r.retriesPoR = v } -// IteratePassedStorageGroups iterates over all storage groups that passed -// Proof-of-Retrievability audit check and passes them into f. Breaks on f's -// false return, f MUST NOT be nil. +// PassedStorageGroups returns storage groups that passed +// Proof-of-Retrievability audit check. Breaks on f's false return, f MUST NOT +// be nil. // // Zero Result has no passed storage groups and doesn't call f. // -// See also SubmitPassedStorageGroup. -func (r Result) IteratePassedStorageGroups(f func(oid.ID) bool) { - r2 := r.v2.GetPassSG() - - var id oid.ID - - for i := range r2 { - _ = id.ReadFromV2(r2[i]) - - if !f(id) { - return - } - } +// Return value MUST NOT be mutated at least until the end of using the Result. +// +// See also [Result.SetPassedStorageGroups]. +func (r Result) PassedStorageGroups() []oid.ID { + return r.passSG } -// SubmitPassedStorageGroup marks storage group as passed Proof-of-Retrievability +// SetPassedStorageGroups sets storage groups that passed Proof-of-Retrievability // audit check. // -// See also IteratePassedStorageGroups. -func (r *Result) SubmitPassedStorageGroup(sg oid.ID) { - var idV2 refs.ObjectID - sg.WriteToV2(&idV2) - - r.v2.SetPassSG(append(r.v2.GetPassSG(), idV2)) +// Argument MUST NOT be mutated at least until the end of using the Result. +// +// See also [Result.PassedStorageGroups]. +func (r *Result) SetPassedStorageGroups(ids []oid.ID) { + r.passSG = ids } -// IterateFailedStorageGroups is similar to IteratePassedStorageGroups but for failed groups. +// FailedStorageGroups is similar to [Result.PassedStorageGroups] but for failed groups. // -// See also SubmitFailedStorageGroup. -func (r Result) IterateFailedStorageGroups(f func(oid.ID) bool) { - v := r.v2.GetFailSG() - var id oid.ID - - for i := range v { - _ = id.ReadFromV2(v[i]) - if !f(id) { - return - } - } +// See also [Result.SetFailedStorageGroups]. +func (r Result) FailedStorageGroups() []oid.ID { + return r.failSG } -// SubmitFailedStorageGroup is similar to SubmitPassedStorageGroup but for failed groups. +// SetFailedStorageGroups is similar to [Result.PassedStorageGroups] but for failed groups. // -// See also IterateFailedStorageGroups. -func (r *Result) SubmitFailedStorageGroup(sg oid.ID) { - var idV2 refs.ObjectID - sg.WriteToV2(&idV2) - - r.v2.SetFailSG(append(r.v2.GetFailSG(), idV2)) +// See also [Result.FailedStorageGroups]. +func (r *Result) SetFailedStorageGroups(ids []oid.ID) { + r.failSG = ids } // Hits returns number of sampled objects under audit placed @@ -279,18 +322,18 @@ func (r *Result) SubmitFailedStorageGroup(sg oid.ID) { // // Zero result has zero hits. // -// See also SetHits. +// See also [Result.SetHits]. func (r Result) Hits() uint32 { - return r.v2.GetHit() + return r.hits } // SetHits sets number of sampled objects under audit placed // in an optimal way according to the containers placement policy // when checking Proof-of-Placement. // -// See also Hits. -func (r *Result) SetHits(hit uint32) { - r.v2.SetHit(hit) +// See also [Result.Hits]. +func (r *Result) SetHits(hits uint32) { + r.hits = hits } // Misses returns number of sampled objects under audit placed @@ -299,18 +342,18 @@ func (r *Result) SetHits(hit uint32) { // // Zero Result has zero misses. // -// See also SetMisses. +// See also [Result.SetMisses]. func (r Result) Misses() uint32 { - return r.v2.GetMiss() + return r.misses } // SetMisses sets number of sampled objects under audit placed // in suboptimal way according to the container's placement policy, // but still at a satisfactory level when checking Proof-of-Placement. // -// See also Misses. -func (r *Result) SetMisses(miss uint32) { - r.v2.SetMiss(miss) +// See also [Result.Misses]. +func (r *Result) SetMisses(misses uint32) { + r.misses = misses } // Failures returns number of sampled objects under audit stored @@ -319,66 +362,55 @@ func (r *Result) SetMisses(miss uint32) { // // Zero result has zero failures. // -// See also SetFailures. +// See also [Result.SetFailures]. func (r Result) Failures() uint32 { - return r.v2.GetFail() + return r.fails } // SetFailures sets number of sampled objects under audit stored // in a way not confirming placement policy or not found at all // when checking Proof-of-Placement. // -// See also Failures. -func (r *Result) SetFailures(fail uint32) { - r.v2.SetFail(fail) +// See also [Result.Failures]. +func (r *Result) SetFailures(fails uint32) { + r.fails = fails } -// IteratePassedStorageNodes iterates over all storage nodes that passed at least one -// Proof-of-Data-Possession audit check and passes their public keys into f. Breaks on -// f's false return. +// PassedStorageNodes returns public keys of storage nodes that passed at least +// one Proof-of-Data-Possession audit check. Breaks on f's false return. // // f MUST NOT be nil and MUST NOT mutate parameter passed into it at least until // the end of using the Result. // // Zero Result has no passed storage nodes and doesn't call f. // -// See also SubmitPassedStorageNode. -func (r Result) IteratePassedStorageNodes(f func([]byte) bool) { - v := r.v2.GetPassNodes() - - for i := range v { - if !f(v[i]) { - return - } - } +// See also [Result.SetPassedStorageNodes]]. +func (r Result) PassedStorageNodes() [][]byte { + return r.passNodes } -// SubmitPassedStorageNodes marks storage node list as passed Proof-of-Data-Possession -// audit check. The list contains public keys. +// SetPassedStorageNodes sets public keys of storage nodes that passed at least +// one Proof-of-Data-Possession audit check. // // Argument and its elements MUST NOT be mutated at least until the end of using the Result. // -// See also IteratePassedStorageNodes. -func (r *Result) SubmitPassedStorageNodes(list [][]byte) { - r.v2.SetPassNodes(list) +// See also [Result.PassedStorageNodes]. +func (r *Result) SetPassedStorageNodes(list [][]byte) { + r.passNodes = list } -// IterateFailedStorageNodes is similar to IteratePassedStorageNodes but for failed nodes. +// FailedStorageNodes is similar to [Result.PassedStorageNodes] but for +// failures. // -// See also SubmitPassedStorageNodes. -func (r Result) IterateFailedStorageNodes(f func([]byte) bool) { - v := r.v2.GetFailNodes() - - for i := range v { - if !f(v[i]) { - return - } - } +// See also [Result.SetFailedStorageNodes]. +func (r Result) FailedStorageNodes() [][]byte { + return r.failNodes } -// SubmitFailedStorageNodes is similar to SubmitPassedStorageNodes but for failed nodes. +// SetFailedStorageNodes is similar to [Result.SetPassedStorageNodes] but for +// failures. // -// See also IterateFailedStorageNodes. -func (r *Result) SubmitFailedStorageNodes(list [][]byte) { - r.v2.SetFailNodes(list) +// See also [Result.SetFailedStorageNodes]. +func (r *Result) SetFailedStorageNodes(list [][]byte) { + r.failNodes = list } diff --git a/audit/result_test.go b/audit/result_test.go index 558cd4fdd..a64651232 100644 --- a/audit/result_test.go +++ b/audit/result_test.go @@ -4,188 +4,526 @@ import ( "bytes" "testing" + apiaudit "github.com/nspcc-dev/neofs-sdk-go/api/audit" + "github.com/nspcc-dev/neofs-sdk-go/api/refs" "github.com/nspcc-dev/neofs-sdk-go/audit" audittest "github.com/nspcc-dev/neofs-sdk-go/audit/test" cidtest "github.com/nspcc-dev/neofs-sdk-go/container/id/test" oid "github.com/nspcc-dev/neofs-sdk-go/object/id" oidtest "github.com/nspcc-dev/neofs-sdk-go/object/id/test" "github.com/stretchr/testify/require" + "google.golang.org/protobuf/proto" ) -func TestResultData(t *testing.T) { +func TestResult_Version(t *testing.T) { var r audit.Result + r.ForContainer(cidtest.ID()) // just to satisfy Unmarshal + var msg1, msg2 apiaudit.DataAuditResult + + b := r.Marshal() + err := proto.Unmarshal(b, &msg1) + require.NoError(t, err) + require.Equal(t, &refs.Version{Major: 2, Minor: 13}, msg1.Version) + + msg2.Version = &refs.Version{Major: 3, Minor: 14} + msg2.ContainerId = &refs.ContainerID{Value: make([]byte, 32)} // just to satisfy Unmarshal + b, err = proto.Marshal(&msg2) + require.NoError(t, err) + err = r.Unmarshal(b) + require.NoError(t, err) + err = proto.Unmarshal(r.Marshal(), &msg1) + require.Equal(t, &refs.Version{Major: 3, Minor: 14}, msg1.Version) +} - countSG := func(passed bool, f func(oid.ID)) int { - called := 0 +func TestResult_Marshal(t *testing.T) { + r := audittest.Result() - ff := func(arg oid.ID) bool { - called++ + data := r.Marshal() - if f != nil { - f(arg) - } + var r2 audit.Result + require.NoError(t, r2.Unmarshal(data)) - return true - } + require.Equal(t, r, r2) - if passed { - r.IteratePassedStorageGroups(ff) - } else { - r.IterateFailedStorageGroups(ff) - } + t.Run("invalid protobuf", func(t *testing.T) { + var r audit.Result + msg := []byte("definitely_not_protobuf") + err := r.Unmarshal(msg) + require.ErrorContains(t, err, "decode protobuf") + }) + t.Run("missing container", func(t *testing.T) { + var msg apiaudit.DataAuditResult + require.NoError(t, proto.Unmarshal(r.Marshal(), &msg)) + msg.ContainerId = nil + b, err := proto.Marshal(&msg) + require.NoError(t, err) + + err = r.Unmarshal(b) + require.ErrorContains(t, err, "container ID is not set") + }) + t.Run("invalid container", func(t *testing.T) { + var msg apiaudit.DataAuditResult + require.NoError(t, proto.Unmarshal(r.Marshal(), &msg)) + msg.ContainerId = &refs.ContainerID{Value: []byte("invalid_container")} + b, err := proto.Marshal(&msg) + require.NoError(t, err) + + err = r.Unmarshal(b) + require.ErrorContains(t, err, "invalid container") + }) + t.Run("invalid passed SG", func(t *testing.T) { + r := r + r.SetPassedStorageGroups([]oid.ID{oidtest.ID(), oidtest.ID()}) + var msg apiaudit.DataAuditResult + require.NoError(t, proto.Unmarshal(r.Marshal(), &msg)) + msg.PassSg[1].Value = []byte("invalid_object") + b, err := proto.Marshal(&msg) + require.NoError(t, err) + + err = r.Unmarshal(b) + require.ErrorContains(t, err, "invalid passed storage group ID #1") + }) + t.Run("invalid failed SG", func(t *testing.T) { + r := r + r.SetFailedStorageGroups([]oid.ID{oidtest.ID(), oidtest.ID()}) + var msg apiaudit.DataAuditResult + require.NoError(t, proto.Unmarshal(r.Marshal(), &msg)) + msg.FailSg[1].Value = []byte("invalid_object") + b, err := proto.Marshal(&msg) + require.NoError(t, err) + + err = r.Unmarshal(b) + require.ErrorContains(t, err, "invalid failed storage group ID #1") + }) +} - return called - } +func TestResult_Epoch(t *testing.T) { + var r audit.Result + r.ForContainer(cidtest.ID()) // just to satisfy Unmarshal + var msg apiaudit.DataAuditResult - countPassSG := func(f func(oid.ID)) int { return countSG(true, f) } - countFailSG := func(f func(oid.ID)) int { return countSG(false, f) } + require.Zero(t, r.Epoch()) - countNodes := func(passed bool, f func([]byte)) int { - called := 0 + r.ForEpoch(42) + require.EqualValues(t, 42, r.Epoch()) - ff := func(arg []byte) bool { - called++ + b := r.Marshal() + err := proto.Unmarshal(b, &msg) + require.NoError(t, err) - if f != nil { - f(arg) - } + b, err = proto.Marshal(&msg) + require.NoError(t, err) - return true - } + r.ForEpoch(43) // any other + require.EqualValues(t, 43, r.Epoch()) - if passed { - r.IteratePassedStorageNodes(ff) - } else { - r.IterateFailedStorageNodes(ff) - } + err = r.Unmarshal(b) + require.NoError(t, err) + require.EqualValues(t, 42, r.Epoch()) +} - return called - } +func TestResult_Container(t *testing.T) { + var r audit.Result + var msg apiaudit.DataAuditResult + cnr := cidtest.ID() - countPassNodes := func(f func([]byte)) int { return countNodes(true, f) } - countFailNodes := func(f func([]byte)) int { return countNodes(false, f) } + _, ok := r.Container() + require.False(t, ok) - require.Zero(t, r.Epoch()) - _, set := r.Container() - require.False(t, set) + r.ForContainer(cnr) + res, ok := r.Container() + require.True(t, ok) + require.Equal(t, cnr, res) + + b := r.Marshal() + err := proto.Unmarshal(b, &msg) + require.NoError(t, err) + + b, err = proto.Marshal(&msg) + require.NoError(t, err) + + cnrOther := cidtest.ChangeID(cnr) + r.ForContainer(cnrOther) + require.True(t, ok) + require.Equal(t, cnr, res) + + err = r.Unmarshal(b) + require.NoError(t, err) + res, ok = r.Container() + require.True(t, ok) + require.Equal(t, cnr, res) +} + +func TestResult_AuditorKey(t *testing.T) { + var r audit.Result + r.ForContainer(cidtest.ID()) // just to satisfy Unmarshal + var msg apiaudit.DataAuditResult + key := []byte("any_key") + + require.Zero(t, r.AuditorKey()) + + b := r.Marshal() + err := r.Unmarshal(b) + require.NoError(t, err) require.Nil(t, r.AuditorKey()) + + r.SetAuditorKey(key) + require.Equal(t, key, r.AuditorKey()) + + b = r.Marshal() + err = proto.Unmarshal(b, &msg) + require.NoError(t, err) + + b, err = proto.Marshal(&msg) + require.NoError(t, err) + + keyOther := bytes.Clone(key) + keyOther[0]++ + r.SetAuditorKey(keyOther) + require.Equal(t, keyOther, r.AuditorKey()) + + err = r.Unmarshal(b) + require.NoError(t, err) + require.Equal(t, key, r.AuditorKey()) +} + +func TestResult_Completed(t *testing.T) { + var r audit.Result + r.ForContainer(cidtest.ID()) // just to satisfy Unmarshal + var msg apiaudit.DataAuditResult + + require.Zero(t, r.Completed()) + + b := r.Marshal() + err := r.Unmarshal(b) + require.NoError(t, err) require.False(t, r.Completed()) + + r.SetCompleted(true) + require.True(t, r.Completed()) + + b = r.Marshal() + err = proto.Unmarshal(b, &msg) + require.NoError(t, err) + + b, err = proto.Marshal(&msg) + require.NoError(t, err) + + r.SetCompleted(false) + require.False(t, r.Completed()) + + err = r.Unmarshal(b) + require.NoError(t, err) + require.True(t, r.Completed()) +} + +func TestResult_RequestsPoR(t *testing.T) { + var r audit.Result + r.ForContainer(cidtest.ID()) // just to satisfy Unmarshal + var msg apiaudit.DataAuditResult + const val = 64304 + require.Zero(t, r.RequestsPoR()) + + b := r.Marshal() + err := r.Unmarshal(b) + require.NoError(t, err) + require.Zero(t, r.RequestsPoR()) + + r.SetRequestsPoR(val) + require.EqualValues(t, val, r.RequestsPoR()) + + b = r.Marshal() + err = proto.Unmarshal(b, &msg) + require.NoError(t, err) + + b, err = proto.Marshal(&msg) + require.NoError(t, err) + + valOther := val + 1 + r.SetRequestsPoR(uint32(valOther)) + require.EqualValues(t, valOther, r.RequestsPoR()) + + err = r.Unmarshal(b) + require.NoError(t, err) + require.EqualValues(t, val, r.RequestsPoR()) +} + +func TestResult_RetriesPoR(t *testing.T) { + var r audit.Result + r.ForContainer(cidtest.ID()) // just to satisfy Unmarshal + var msg apiaudit.DataAuditResult + const val = 984609 + require.Zero(t, r.RetriesPoR()) - require.Zero(t, countPassSG(nil)) - require.Zero(t, countFailSG(nil)) - require.Zero(t, countPassNodes(nil)) - require.Zero(t, countFailNodes(nil)) - epoch := uint64(13) - r.ForEpoch(epoch) - require.Equal(t, epoch, r.Epoch()) + b := r.Marshal() + err := r.Unmarshal(b) + require.NoError(t, err) + require.Zero(t, r.RetriesPoR()) - cnr := cidtest.ID() - r.ForContainer(cnr) - cID, set := r.Container() - require.True(t, set) - require.Equal(t, cnr, cID) + r.SetRetriesPoR(val) + require.EqualValues(t, val, r.RetriesPoR()) - key := []byte{1, 2, 3} - r.SetAuditorKey(key) - require.Equal(t, key, r.AuditorKey()) + b = r.Marshal() + err = proto.Unmarshal(b, &msg) + require.NoError(t, err) - r.Complete() - require.True(t, r.Completed()) + b, err = proto.Marshal(&msg) + require.NoError(t, err) + + valOther := val + 1 + r.SetRetriesPoR(uint32(valOther)) + require.EqualValues(t, valOther, r.RetriesPoR()) - requests := uint32(2) - r.SetRequestsPoR(requests) - require.Equal(t, requests, r.RequestsPoR()) - - retries := uint32(1) - r.SetRetriesPoR(retries) - require.Equal(t, retries, r.RetriesPoR()) - - passSG1, passSG2 := oidtest.ID(), oidtest.ID() - r.SubmitPassedStorageGroup(passSG1) - r.SubmitPassedStorageGroup(passSG2) - - called1, called2 := false, false - - require.EqualValues(t, 2, countPassSG(func(id oid.ID) { - if id.Equals(passSG1) { - called1 = true - } else if id.Equals(passSG2) { - called2 = true - } - })) - require.True(t, called1) - require.True(t, called2) - - failSG1, failSG2 := oidtest.ID(), oidtest.ID() - r.SubmitFailedStorageGroup(failSG1) - r.SubmitFailedStorageGroup(failSG2) - - called1, called2 = false, false - - require.EqualValues(t, 2, countFailSG(func(id oid.ID) { - if id.Equals(failSG1) { - called1 = true - } else if id.Equals(failSG2) { - called2 = true - } - })) - require.True(t, called1) - require.True(t, called2) - - hit := uint32(1) - r.SetHits(hit) - require.Equal(t, hit, r.Hits()) - - miss := uint32(2) - r.SetMisses(miss) - require.Equal(t, miss, r.Misses()) - - fail := uint32(3) - r.SetFailures(fail) - require.Equal(t, fail, r.Failures()) - - passNodes := [][]byte{{1}, {2}} - r.SubmitPassedStorageNodes(passNodes) - - called1, called2 = false, false - - require.EqualValues(t, 2, countPassNodes(func(arg []byte) { - if bytes.Equal(arg, passNodes[0]) { - called1 = true - } else if bytes.Equal(arg, passNodes[1]) { - called2 = true - } - })) - require.True(t, called1) - require.True(t, called2) - - failNodes := [][]byte{{3}, {4}} - r.SubmitFailedStorageNodes(failNodes) - - called1, called2 = false, false - - require.EqualValues(t, 2, countFailNodes(func(arg []byte) { - if bytes.Equal(arg, failNodes[0]) { - called1 = true - } else if bytes.Equal(arg, failNodes[1]) { - called2 = true - } - })) - require.True(t, called1) - require.True(t, called2) + err = r.Unmarshal(b) + require.NoError(t, err) + require.EqualValues(t, val, r.RetriesPoR()) } -func TestResultEncoding(t *testing.T) { - r := audittest.Result() +func TestResult_Hits(t *testing.T) { + var r audit.Result + r.ForContainer(cidtest.ID()) // just to satisfy Unmarshal + var msg apiaudit.DataAuditResult + const val = 23641 - t.Run("binary", func(t *testing.T) { - data := r.Marshal() + require.Zero(t, r.Hits()) - var r2 audit.Result - require.NoError(t, r2.Unmarshal(data)) + b := r.Marshal() + err := r.Unmarshal(b) + require.NoError(t, err) + require.Zero(t, r.Hits()) - require.Equal(t, r, r2) - }) + r.SetHits(val) + require.EqualValues(t, val, r.Hits()) + + b = r.Marshal() + err = proto.Unmarshal(b, &msg) + require.NoError(t, err) + + b, err = proto.Marshal(&msg) + require.NoError(t, err) + + valOther := val + 1 + r.SetHits(uint32(valOther)) + require.EqualValues(t, valOther, r.Hits()) + + err = r.Unmarshal(b) + require.NoError(t, err) + require.EqualValues(t, val, r.Hits()) +} + +func TestResult_Misses(t *testing.T) { + var r audit.Result + r.ForContainer(cidtest.ID()) // just to satisfy Unmarshal + var msg apiaudit.DataAuditResult + const val = 684975 + + require.Zero(t, r.Misses()) + + b := r.Marshal() + err := r.Unmarshal(b) + require.NoError(t, err) + require.Zero(t, r.Misses()) + + r.SetMisses(val) + require.EqualValues(t, val, r.Misses()) + + b = r.Marshal() + err = proto.Unmarshal(b, &msg) + require.NoError(t, err) + + b, err = proto.Marshal(&msg) + require.NoError(t, err) + + valOther := val + 1 + r.SetMisses(uint32(valOther)) + require.EqualValues(t, valOther, r.Misses()) + + err = r.Unmarshal(b) + require.NoError(t, err) + require.EqualValues(t, val, r.Misses()) +} + +func TestResult_Failures(t *testing.T) { + var r audit.Result + r.ForContainer(cidtest.ID()) // just to satisfy Unmarshal + var msg apiaudit.DataAuditResult + const val = 25927509 + + require.Zero(t, r.Failures()) + + b := r.Marshal() + err := r.Unmarshal(b) + require.NoError(t, err) + require.Zero(t, r.Failures()) + + r.SetFailures(val) + require.EqualValues(t, val, r.Failures()) + + b = r.Marshal() + err = proto.Unmarshal(b, &msg) + require.NoError(t, err) + + b, err = proto.Marshal(&msg) + require.NoError(t, err) + valOther := val + 1 + r.SetFailures(uint32(valOther)) + require.EqualValues(t, valOther, r.Failures()) + + err = r.Unmarshal(b) + require.NoError(t, err) + require.EqualValues(t, val, r.Failures()) +} + +func TestResult_PassedStorageGroups(t *testing.T) { + var r audit.Result + r.ForContainer(cidtest.ID()) // just to satisfy Unmarshal + var msg apiaudit.DataAuditResult + ids := []oid.ID{oidtest.ID(), oidtest.ID()} + + require.Zero(t, r.PassedStorageGroups()) + + b := r.Marshal() + err := r.Unmarshal(b) + require.NoError(t, err) + require.Zero(t, r.PassedStorageNodes()) + + r.SetPassedStorageGroups(ids) + require.Equal(t, ids, r.PassedStorageGroups()) + + b = r.Marshal() + err = proto.Unmarshal(b, &msg) + require.NoError(t, err) + + b, err = proto.Marshal(&msg) + require.NoError(t, err) + + idsOther := make([]oid.ID, len(ids)) + for i := range idsOther { + idsOther[i] = oidtest.ChangeID(ids[i]) + } + + r.SetPassedStorageGroups(idsOther) + require.Equal(t, idsOther, r.PassedStorageGroups()) + + err = r.Unmarshal(b) + require.NoError(t, err) + require.Equal(t, ids, r.PassedStorageGroups()) +} + +func TestResult_FailedStorageGroups(t *testing.T) { + var r audit.Result + r.ForContainer(cidtest.ID()) // just to satisfy Unmarshal + var msg apiaudit.DataAuditResult + ids := []oid.ID{oidtest.ID(), oidtest.ID()} + + require.Zero(t, r.FailedStorageGroups()) + + b := r.Marshal() + err := r.Unmarshal(b) + require.NoError(t, err) + require.Zero(t, r.FailedStorageGroups()) + + r.SetFailedStorageGroups(ids) + require.Equal(t, ids, r.FailedStorageGroups()) + + b = r.Marshal() + err = proto.Unmarshal(b, &msg) + require.NoError(t, err) + + b, err = proto.Marshal(&msg) + require.NoError(t, err) + + idsOther := make([]oid.ID, len(ids)) + for i := range idsOther { + idsOther[i] = oidtest.ChangeID(ids[i]) + } + + r.SetFailedStorageGroups(idsOther) + require.Equal(t, idsOther, r.FailedStorageGroups()) + + err = r.Unmarshal(b) + require.NoError(t, err) + require.Equal(t, ids, r.FailedStorageGroups()) +} + +func TestResult_PassedStorageNodes(t *testing.T) { + var r audit.Result + r.ForContainer(cidtest.ID()) // just to satisfy Unmarshal + var msg apiaudit.DataAuditResult + keys := [][]byte{ + []byte("any_key1"), + []byte("any_key2"), + } + + require.Zero(t, r.PassedStorageNodes()) + + b := r.Marshal() + err := r.Unmarshal(b) + require.NoError(t, err) + require.Zero(t, r.PassedStorageNodes()) + + r.SetPassedStorageNodes(keys) + require.Equal(t, keys, r.PassedStorageNodes()) + + b = r.Marshal() + err = proto.Unmarshal(b, &msg) + require.NoError(t, err) + + b, err = proto.Marshal(&msg) + require.NoError(t, err) + + keysOther := make([][]byte, len(keys)) + for i := range keysOther { + keysOther[i] = bytes.Clone(keys[i]) + keysOther[i][0]++ + } + + r.SetPassedStorageNodes(keysOther) + require.Equal(t, keysOther, r.PassedStorageNodes()) + + err = r.Unmarshal(b) + require.NoError(t, err) + require.Equal(t, keys, r.PassedStorageNodes()) +} + +func TestResult_FailedStorageNodes(t *testing.T) { + var r audit.Result + r.ForContainer(cidtest.ID()) // just to satisfy Unmarshal + var msg apiaudit.DataAuditResult + keys := [][]byte{ + []byte("any_key1"), + []byte("any_key2"), + } + + require.Zero(t, r.FailedStorageNodes()) + + b := r.Marshal() + err := r.Unmarshal(b) + require.NoError(t, err) + require.Zero(t, r.FailedStorageNodes()) + + r.SetFailedStorageNodes(keys) + require.Equal(t, keys, r.FailedStorageNodes()) + + b = r.Marshal() + err = proto.Unmarshal(b, &msg) + require.NoError(t, err) + + b, err = proto.Marshal(&msg) + require.NoError(t, err) + + keysOther := make([][]byte, len(keys)) + for i := range keysOther { + keysOther[i] = bytes.Clone(keys[i]) + keysOther[i][0]++ + } + + r.SetFailedStorageNodes(keysOther) + require.Equal(t, keysOther, r.FailedStorageNodes()) + + err = r.Unmarshal(b) + require.NoError(t, err) + require.Equal(t, keys, r.FailedStorageNodes()) } diff --git a/audit/test/generate.go b/audit/test/generate.go index 9ce9c9184..c3d55658d 100644 --- a/audit/test/generate.go +++ b/audit/test/generate.go @@ -1,6 +1,10 @@ package audittest import ( + "fmt" + "math/rand" + "strconv" + "github.com/nspcc-dev/neofs-sdk-go/audit" cidtest "github.com/nspcc-dev/neofs-sdk-go/container/id/test" oidtest "github.com/nspcc-dev/neofs-sdk-go/object/id/test" @@ -11,26 +15,32 @@ func Result() audit.Result { var x audit.Result x.ForContainer(cidtest.ID()) - x.SetAuditorKey([]byte("key")) - x.Complete() - x.ForEpoch(44) - x.SetHits(55) - x.SetMisses(66) - x.SetFailures(77) - x.SetRequestsPoR(88) - x.SetRequestsPoR(99) - x.SubmitFailedStorageNodes([][]byte{ - []byte("node1"), - []byte("node2"), - }) - x.SubmitPassedStorageNodes([][]byte{ - []byte("node3"), - []byte("node4"), - }) - x.SubmitPassedStorageGroup(oidtest.ID()) - x.SubmitPassedStorageGroup(oidtest.ID()) - x.SubmitFailedStorageGroup(oidtest.ID()) - x.SubmitFailedStorageGroup(oidtest.ID()) + auditorKey := make([]byte, 33) + rand.Read(auditorKey) + x.SetAuditorKey(auditorKey) + x.SetCompleted(rand.Int()%2 == 0) + x.ForEpoch(rand.Uint64()) + x.SetHits(rand.Uint32()) + x.SetMisses(rand.Uint32()) + x.SetFailures(rand.Uint32()) + x.SetRequestsPoR(rand.Uint32()) + x.SetRequestsPoR(rand.Uint32()) + failedNodes := make([][]byte, rand.Int()%4) + for i := range failedNodes { + failedNodes[i] = []byte("failed_node_" + strconv.Itoa(i+1)) + } + x.SetFailedStorageNodes(failedNodes) + passedNodes := make([][]byte, rand.Int()%4) + for i := range passedNodes { + passedNodes[i] = []byte("passed_node_" + strconv.Itoa(i+1)) + } + x.SetPassedStorageNodes(passedNodes) + x.SetPassedStorageGroups(oidtest.NIDs(rand.Int() % 4)) + x.SetFailedStorageGroups(oidtest.NIDs(rand.Int() % 4)) + + if err := x.Unmarshal(x.Marshal()); err != nil { // to set all defaults + panic(fmt.Errorf("unexpected encode-decode failure: %w", err)) + } return x } diff --git a/audit/test/generate_test.go b/audit/test/generate_test.go new file mode 100644 index 000000000..9d55f9b59 --- /dev/null +++ b/audit/test/generate_test.go @@ -0,0 +1,18 @@ +package audittest_test + +import ( + "testing" + + "github.com/nspcc-dev/neofs-sdk-go/audit" + audittest "github.com/nspcc-dev/neofs-sdk-go/audit/test" + "github.com/stretchr/testify/require" +) + +func TestDecimal(t *testing.T) { + r := audittest.Result() + require.NotEqual(t, r, audittest.Result()) + + var r2 audit.Result + require.NoError(t, r2.Unmarshal(r.Marshal())) + require.EqualValues(t, r, r2) +} diff --git a/bearer/bearer.go b/bearer/bearer.go index 8ae93fb61..35a1c3df4 100644 --- a/bearer/bearer.go +++ b/bearer/bearer.go @@ -1,22 +1,25 @@ package bearer import ( + "crypto/ecdsa" "errors" "fmt" "github.com/nspcc-dev/neo-go/pkg/crypto/keys" - "github.com/nspcc-dev/neofs-api-go/v2/acl" - "github.com/nspcc-dev/neofs-api-go/v2/refs" + "github.com/nspcc-dev/neofs-sdk-go/api/acl" + "github.com/nspcc-dev/neofs-sdk-go/api/refs" cid "github.com/nspcc-dev/neofs-sdk-go/container/id" neofscrypto "github.com/nspcc-dev/neofs-sdk-go/crypto" "github.com/nspcc-dev/neofs-sdk-go/eacl" "github.com/nspcc-dev/neofs-sdk-go/user" + "google.golang.org/protobuf/encoding/protojson" + "google.golang.org/protobuf/proto" ) // Token represents bearer token for object service operations. // -// Token is mutually compatible with github.com/nspcc-dev/neofs-api-go/v2/acl.BearerToken -// message. See ReadFromV2 / WriteToV2 methods. +// Token is mutually compatible with [acl.BearerToken] message. See +// [Token.ReadFromV2] / [Token.WriteToV2] methods. // // Instances can be created using built-in var declaration. type Token struct { @@ -33,123 +36,122 @@ type Token struct { iat, nbf, exp uint64 sigSet bool - sig refs.Signature + sig neofscrypto.Signature } -// reads Token from the acl.BearerToken message. If checkFieldPresence is set, -// returns an error on absence of any protocol-required field. -func (b *Token) readFromV2(m acl.BearerToken, checkFieldPresence bool) error { +func (b *Token) readFromV2(m *acl.BearerToken, checkFieldPresence bool) error { var err error - body := m.GetBody() - if checkFieldPresence && body == nil { + if checkFieldPresence && m.Body == nil { return errors.New("missing token body") } - eaclTable := body.GetEACL() - if b.eaclTableSet = eaclTable != nil; b.eaclTableSet { - b.eaclTable = *eacl.NewTableFromV2(eaclTable) + bodySet := m.Body != nil + if b.eaclTableSet = bodySet && m.Body.EaclTable != nil; b.eaclTableSet { + err = b.eaclTable.ReadFromV2(m.Body.EaclTable) + if err != nil { + return fmt.Errorf("invalid eACL table: %w", err) + } } else if checkFieldPresence { return errors.New("missing eACL table") } - targetUser := body.GetOwnerID() - if b.targetUserSet = targetUser != nil; b.targetUserSet { - err = b.targetUser.ReadFromV2(*targetUser) + if b.targetUserSet = bodySet && m.Body.OwnerId != nil; b.targetUserSet { + err = b.targetUser.ReadFromV2(m.Body.OwnerId) if err != nil { return fmt.Errorf("invalid target user: %w", err) } } - issuer := body.GetIssuer() - if b.issuerSet = issuer != nil; b.issuerSet { - err = b.issuer.ReadFromV2(*issuer) + if b.issuerSet = bodySet && m.Body.Issuer != nil; b.issuerSet { + err = b.issuer.ReadFromV2(m.Body.Issuer) if err != nil { return fmt.Errorf("invalid issuer: %w", err) } } - lifetime := body.GetLifetime() - if b.lifetimeSet = lifetime != nil; b.lifetimeSet { - b.iat = lifetime.GetIat() - b.nbf = lifetime.GetNbf() - b.exp = lifetime.GetExp() + if b.lifetimeSet = bodySet && m.Body.Lifetime != nil; b.lifetimeSet { + b.iat = m.Body.Lifetime.Iat + b.nbf = m.Body.Lifetime.Nbf + b.exp = m.Body.Lifetime.Exp } else if checkFieldPresence { return errors.New("missing token lifetime") } - sig := m.GetSignature() - if b.sigSet = sig != nil; sig != nil { - b.sig = *sig - } else if checkFieldPresence { - return errors.New("missing body signature") + if b.sigSet = m.Signature != nil; b.sigSet { + err = b.sig.ReadFromV2(m.Signature) + if err != nil { + return fmt.Errorf("invalid body signature: %w", err) + } } return nil } -// ReadFromV2 reads Token from the acl.BearerToken message. +// ReadFromV2 reads Token from the [acl.BearerToken] message. Returns an error +// if the message is malformed according to the NeoFS API V2 protocol. The +// message must not be nil. +// +// ReadFromV2 is intended to be used by the NeoFS API V2 client/server +// implementation only and is not expected to be directly used by applications. // -// See also WriteToV2. -func (b *Token) ReadFromV2(m acl.BearerToken) error { +// See also [Token.WriteToV2]. +func (b *Token) ReadFromV2(m *acl.BearerToken) error { return b.readFromV2(m, true) } -func (b Token) fillBody() *acl.BearerTokenBody { +func (b Token) fillBody() *acl.BearerToken_Body { if !b.eaclTableSet && !b.targetUserSet && !b.lifetimeSet && !b.issuerSet { return nil } - var body acl.BearerTokenBody + var body acl.BearerToken_Body if b.eaclTableSet { - body.SetEACL(b.eaclTable.ToV2()) + body.EaclTable = new(acl.EACLTable) + b.eaclTable.WriteToV2(body.EaclTable) } if b.targetUserSet { - var targetUser refs.OwnerID - b.targetUser.WriteToV2(&targetUser) - - body.SetOwnerID(&targetUser) + body.OwnerId = new(refs.OwnerID) + b.targetUser.WriteToV2(body.OwnerId) } if b.issuerSet { - var issuer refs.OwnerID - b.issuer.WriteToV2(&issuer) - - body.SetIssuer(&issuer) + body.Issuer = new(refs.OwnerID) + b.issuer.WriteToV2(body.Issuer) } if b.lifetimeSet { - var lifetime acl.TokenLifetime - lifetime.SetIat(b.iat) - lifetime.SetNbf(b.nbf) - lifetime.SetExp(b.exp) - - body.SetLifetime(&lifetime) + body.Lifetime = new(acl.BearerToken_Body_TokenLifetime) + body.Lifetime.Iat = b.iat + body.Lifetime.Nbf = b.nbf + body.Lifetime.Exp = b.exp } return &body } func (b Token) signedData() []byte { - return b.fillBody().StableMarshal(nil) + m := b.fillBody() + bs := make([]byte, m.MarshaledSize()) + m.MarshalStable(bs) + return bs } -// WriteToV2 writes Token to the acl.BearerToken message. -// The message must not be nil. +// WriteToV2 writes Table to the [acl.BearerToken] message of the NeoFS API +// protocol. +// +// WriteToV2 is intended to be used by the NeoFS API V2 client/server +// implementation only and is not expected to be directly used by applications. // -// See also ReadFromV2. +// See also [Token.ReadFromV2]. func (b Token) WriteToV2(m *acl.BearerToken) { - m.SetBody(b.fillBody()) - - var sig *refs.Signature - + m.Body = b.fillBody() if b.sigSet { - sig = &b.sig + m.Signature = new(refs.Signature) + b.sig.WriteToV2(m.Signature) } - - m.SetSignature(sig) } // SetExp sets "exp" (expiration time) claim which identifies the @@ -160,7 +162,7 @@ func (b Token) WriteToV2(m *acl.BearerToken) { // // Naming is inspired by https://datatracker.ietf.org/doc/html/rfc7519#section-4.1.4. // -// See also InvalidAt. +// See also [Token.InvalidAt]. func (b *Token) SetExp(exp uint64) { b.exp = exp b.lifetimeSet = true @@ -173,7 +175,7 @@ func (b *Token) SetExp(exp uint64) { // // Naming is inspired by https://datatracker.ietf.org/doc/html/rfc7519#section-4.1.5. // -// See also InvalidAt. +// See also [Token.InvalidAt]. func (b *Token) SetNbf(nbf uint64) { b.nbf = nbf b.lifetimeSet = true @@ -185,7 +187,7 @@ func (b *Token) SetNbf(nbf uint64) { // // Naming is inspired by https://datatracker.ietf.org/doc/html/rfc7519#section-4.1.6. // -// See also InvalidAt. +// See also [Token.InvalidAt]. func (b *Token) SetIat(iat uint64) { b.iat = iat b.lifetimeSet = true @@ -195,7 +197,7 @@ func (b *Token) SetIat(iat uint64) { // // Zero Container is invalid in any epoch. // -// See also SetExp, SetNbf, SetIat. +// See also [Token.SetExp], [Token.SetNbf], [Token.SetIat]. func (b Token) InvalidAt(epoch uint64) bool { return !b.lifetimeSet || b.nbf > epoch || b.iat > epoch || b.exp < epoch } @@ -208,21 +210,18 @@ func (b Token) InvalidAt(epoch uint64) bool { // SetEACLTable MUST be called if Token is going to be transmitted over // NeoFS API V2 protocol. // -// See also EACLTable, AssertContainer. +// See also [Token.EACLTable], [Token.AssertContainer]. func (b *Token) SetEACLTable(table eacl.Table) { b.eaclTable = table b.eaclTableSet = true } -// EACLTable returns extended ACL table set by SetEACLTable. +// EACLTable returns extended ACL table set by SetEACLTable. Second value +// indicates whether the eACL is set. // // Zero Token has zero eacl.Table. -func (b Token) EACLTable() eacl.Table { - if b.eaclTableSet { - return b.eaclTable - } - - return eacl.Table{} +func (b Token) EACLTable() (eacl.Table, bool) { + return b.eaclTable, b.eaclTableSet } // AssertContainer checks if the token is valid within the given container. @@ -232,14 +231,14 @@ func (b Token) EACLTable() eacl.Table { // // Zero Token is valid in any container. // -// See also SetEACLTable. +// See also [Token.SetEACLTable]. func (b Token) AssertContainer(cnr cid.ID) bool { if !b.eaclTableSet { return true } - cnrTable, set := b.eaclTable.CID() - return !set || cnrTable.Equals(cnr) + cnrTable := b.eaclTable.LimitedContainer() + return cnrTable.IsZero() || cnrTable == cnr } // ForUser specifies ID of the user who can use the Token for the operations @@ -247,7 +246,7 @@ func (b Token) AssertContainer(cnr cid.ID) bool { // // Optional: by default, any user has access to Token usage. // -// See also AssertUser. +// See also [Token.AssertUser]. func (b *Token) ForUser(id user.ID) { b.targetUser = id b.targetUserSet = true @@ -257,9 +256,9 @@ func (b *Token) ForUser(id user.ID) { // // Zero Token is available to any user. // -// See also ForUser. +// See also [Token.ForUser]. func (b Token) AssertUser(id user.ID) bool { - return !b.targetUserSet || b.targetUser.Equals(id) + return !b.targetUserSet || b.targetUser == id } // Sign calculates and writes signature of the [Token] data along with issuer ID @@ -275,20 +274,16 @@ func (b Token) AssertUser(id user.ID) bool { func (b *Token) Sign(signer user.Signer) error { b.SetIssuer(signer.UserID()) - var sig neofscrypto.Signature - - err := sig.Calculate(signer, b.signedData()) + err := b.sig.Calculate(signer, b.signedData()) if err != nil { return err } - - sig.WriteToV2(&b.sig) b.sigSet = true return nil } -// SignedData returns actual payload to sign. +// SignedData returns signed data of the Token. // // See also [Token.Sign], [Token.UnmarshalSignedData]. func (b *Token) SignedData() []byte { @@ -297,84 +292,77 @@ func (b *Token) SignedData() []byte { // UnmarshalSignedData is a reverse op to [Token.SignedData]. func (b *Token) UnmarshalSignedData(data []byte) error { - var body acl.BearerTokenBody - err := body.Unmarshal(data) + var body acl.BearerToken_Body + err := proto.Unmarshal(data, &body) if err != nil { return fmt.Errorf("decode body: %w", err) } - var tok acl.BearerToken - tok.SetBody(&body) - return b.readFromV2(tok, false) + return b.readFromV2(&acl.BearerToken{Body: &body}, false) } // VerifySignature checks if Token signature is presented and valid. // // Zero Token fails the check. // -// See also Sign. +// See also [Token.Sign]. func (b Token) VerifySignature() bool { - if !b.sigSet { - return false - } - - var sig neofscrypto.Signature - // TODO: (#233) check owner<->key relation - return sig.ReadFromV2(b.sig) == nil && sig.Verify(b.signedData()) + return b.sigSet && b.sig.Verify(b.signedData()) } // Marshal encodes Token into a binary format of the NeoFS API protocol // (Protocol Buffers V3 with direct field order). // -// See also Unmarshal. +// See also [Token.Unmarshal]. func (b Token) Marshal() []byte { var m acl.BearerToken b.WriteToV2(&m) - - return m.StableMarshal(nil) + bs := make([]byte, m.MarshaledSize()) + m.MarshalStable(bs) + return bs } -// Unmarshal decodes NeoFS API protocol binary data into the Token -// (Protocol Buffers V3 with direct field order). Returns an error describing -// a format violation. +// Unmarshal decodes Protocol Buffers V3 binary data into the Table. Returns an +// error describing a format violation of the specified fields. Unmarshal does +// not check presence of the required fields and, at the same time, checks +// format of presented fields. // -// See also Marshal. +// See also [Token.Marshal]. func (b *Token) Unmarshal(data []byte) error { var m acl.BearerToken - - err := m.Unmarshal(data) + err := proto.Unmarshal(data, &m) if err != nil { - return err + return fmt.Errorf("decode protobuf: %w", err) } - return b.readFromV2(m, false) + return b.readFromV2(&m, false) } // MarshalJSON encodes Token into a JSON format of the NeoFS API protocol // (Protocol Buffers V3 JSON). // -// See also UnmarshalJSON. +// See also [Token.UnmarshalJSON]. func (b Token) MarshalJSON() ([]byte, error) { var m acl.BearerToken b.WriteToV2(&m) - - return m.MarshalJSON() + return protojson.Marshal(&m) } -// UnmarshalJSON decodes NeoFS API protocol JSON data into the Token -// (Protocol Buffers V3 JSON). Returns an error describing a format violation. +// UnmarshalJSON decodes NeoFS API protocol JSON data into the Token (Protocol +// Buffers V3 JSON). Returns an error describing a format violation. +// UnmarshalJSON does not check presence of the required fields and, at the same +// time, checks format of presented fields. // -// See also MarshalJSON. +// See also [Table.MarshalJSON]. func (b *Token) UnmarshalJSON(data []byte) error { var m acl.BearerToken - - err := m.UnmarshalJSON(data) + err := protojson.Unmarshal(data, &m) if err != nil { - return err + return fmt.Errorf("decode protojson: %w", err) } - return b.readFromV2(m, false) + return b.readFromV2(&m, false) } // SigningKeyBytes returns issuer's public key in a binary format of @@ -391,7 +379,7 @@ func (b *Token) UnmarshalJSON(data []byte) error { // See also [Token.ResolveIssuer]. func (b Token) SigningKeyBytes() []byte { if b.sigSet { - return b.sig.GetKey() + return b.sig.PublicKeyBytes() } return nil @@ -431,20 +419,11 @@ func (b Token) ResolveIssuer() user.ID { binKey := b.SigningKeyBytes() if len(binKey) != 0 { - if err := idFromKey(&usr, binKey); err != nil { - usr = user.ID{} + var pk keys.PublicKey + if err := pk.DecodeBytes(binKey); err == nil { + usr = user.ResolveFromECDSAPublicKey(ecdsa.PublicKey(pk)) } } return usr } - -func idFromKey(id *user.ID, key []byte) error { - var pk keys.PublicKey - if err := pk.DecodeBytes(key); err != nil { - return fmt.Errorf("decode owner failed: %w", err) - } - - id.SetScriptHash(pk.GetScriptHash()) - return nil -} diff --git a/bearer/bearer_test.go b/bearer/bearer_test.go index 8acf5de9f..b64cfbf19 100644 --- a/bearer/bearer_test.go +++ b/bearer/bearer_test.go @@ -1,205 +1,476 @@ package bearer_test import ( - "bytes" - "math/rand" "testing" - "github.com/nspcc-dev/neofs-api-go/v2/acl" - "github.com/nspcc-dev/neofs-api-go/v2/refs" + apiacl "github.com/nspcc-dev/neofs-sdk-go/api/acl" "github.com/nspcc-dev/neofs-sdk-go/bearer" bearertest "github.com/nspcc-dev/neofs-sdk-go/bearer/test" - cidtest "github.com/nspcc-dev/neofs-sdk-go/container/id/test" - neofscrypto "github.com/nspcc-dev/neofs-sdk-go/crypto" - "github.com/nspcc-dev/neofs-sdk-go/crypto/test" - "github.com/nspcc-dev/neofs-sdk-go/eacl" eacltest "github.com/nspcc-dev/neofs-sdk-go/eacl/test" usertest "github.com/nspcc-dev/neofs-sdk-go/user/test" "github.com/stretchr/testify/require" + "google.golang.org/protobuf/encoding/protojson" + "google.golang.org/protobuf/proto" ) -// compares binary representations of two eacl.Table instances. -func isEqualEACLTables(t1, t2 eacl.Table) bool { - d1, err := t1.Marshal() - if err != nil { - panic(err) - } - - d2, err := t2.Marshal() - if err != nil { - panic(err) - } - - return bytes.Equal(d1, d2) -} - func TestToken_SetEACLTable(t *testing.T) { - var val bearer.Token - var m acl.BearerToken - filled := bearertest.Token(t) - - val.WriteToV2(&m) - require.Zero(t, m.GetBody()) - - val2 := filled - - require.NoError(t, val2.Unmarshal(val.Marshal())) - require.Zero(t, val2.EACLTable()) - - val2 = filled - - jd, err := val.MarshalJSON() - require.NoError(t, err) - - require.NoError(t, val2.UnmarshalJSON(jd)) - require.Zero(t, val2.EACLTable()) - - // set value - - eaclTable := eacltest.Table(t) - - val.SetEACLTable(eaclTable) - require.True(t, isEqualEACLTables(eaclTable, val.EACLTable())) - - val.WriteToV2(&m) - eaclTableV2 := eaclTable.ToV2() - require.Equal(t, eaclTableV2, m.GetBody().GetEACL()) - - val2 = filled - - require.NoError(t, val2.Unmarshal(val.Marshal())) - require.True(t, isEqualEACLTables(eaclTable, val.EACLTable())) - - val2 = filled - - jd, err = val.MarshalJSON() - require.NoError(t, err) - - require.NoError(t, val2.UnmarshalJSON(jd)) - require.True(t, isEqualEACLTables(eaclTable, val.EACLTable())) + var tok bearer.Token + + _, ok := tok.EACLTable() + require.False(t, ok) + + tbl := eacltest.Table() + tblOther := eacltest.Table() + + tok.SetEACLTable(tbl) + res, ok := tok.EACLTable() + require.True(t, ok) + require.Equal(t, tbl, res) + + tok.SetEACLTable(tblOther) + res, ok = tok.EACLTable() + require.True(t, ok) + require.Equal(t, tblOther, res) + + t.Run("encoding", func(t *testing.T) { + t.Run("binary", func(t *testing.T) { + var src, dst bearer.Token + + dst.SetEACLTable(tbl) + + err := dst.Unmarshal(src.Marshal()) + require.NoError(t, err) + _, ok = dst.EACLTable() + require.False(t, ok) + + dst.SetEACLTable(tblOther) + src.SetEACLTable(tbl) + err = dst.Unmarshal(src.Marshal()) + require.NoError(t, err) + res, ok = dst.EACLTable() + require.True(t, ok) + require.Equal(t, tbl, res) + }) + t.Run("api", func(t *testing.T) { + src := bearertest.Token() + var dst bearer.Token + var msg apiacl.BearerToken + + src.SetEACLTable(tbl) + src.WriteToV2(&msg) + err := dst.ReadFromV2(&msg) + require.NoError(t, err) + res, ok = dst.EACLTable() + require.True(t, ok) + require.Equal(t, tbl, res) + }) + t.Run("json", func(t *testing.T) { + var src, dst bearer.Token + + dst.SetEACLTable(tbl) + + j, err := src.MarshalJSON() + require.NoError(t, err) + err = dst.UnmarshalJSON(j) + require.NoError(t, err) + _, ok = dst.EACLTable() + require.False(t, ok) + + dst.SetEACLTable(tblOther) + src.SetEACLTable(tbl) + j, err = src.MarshalJSON() + require.NoError(t, err) + err = dst.UnmarshalJSON(j) + require.NoError(t, err) + res, ok = dst.EACLTable() + require.True(t, ok) + require.Equal(t, tbl, res) + }) + }) } func TestToken_ForUser(t *testing.T) { - var val bearer.Token - var m acl.BearerToken - filled := bearertest.Token(t) - - val.WriteToV2(&m) - require.Zero(t, m.GetBody()) - - val2 := filled - - require.NoError(t, val2.Unmarshal(val.Marshal())) - - val2.WriteToV2(&m) - require.Zero(t, m.GetBody()) - - val2 = filled - - jd, err := val.MarshalJSON() - require.NoError(t, err) - - require.NoError(t, val2.UnmarshalJSON(jd)) - - val2.WriteToV2(&m) - require.Zero(t, m.GetBody()) - - // set value - usr := usertest.ID(t) + var tok bearer.Token - var usrV2 refs.OwnerID - usr.WriteToV2(&usrV2) + usr := usertest.ID() + otherUsr := usertest.ChangeID(usr) - val.ForUser(usr) + require.True(t, tok.AssertUser(usr)) + require.True(t, tok.AssertUser(otherUsr)) - val.WriteToV2(&m) - require.Equal(t, usrV2, *m.GetBody().GetOwnerID()) - - val2 = filled - - require.NoError(t, val2.Unmarshal(val.Marshal())) + tok.ForUser(usr) + require.True(t, tok.AssertUser(usr)) + require.False(t, tok.AssertUser(otherUsr)) - val2.WriteToV2(&m) - require.Equal(t, usrV2, *m.GetBody().GetOwnerID()) + tok.ForUser(otherUsr) + require.False(t, tok.AssertUser(usr)) + require.True(t, tok.AssertUser(otherUsr)) - val2 = filled + t.Run("encoding", func(t *testing.T) { + t.Run("binary", func(t *testing.T) { + var src, dst bearer.Token + var msg apiacl.BearerToken - jd, err = val.MarshalJSON() - require.NoError(t, err) + dst.ForUser(usr) - require.NoError(t, val2.UnmarshalJSON(jd)) + err := dst.Unmarshal(src.Marshal()) + require.NoError(t, err) + require.True(t, dst.AssertUser(otherUsr)) + dst.WriteToV2(&msg) + require.Nil(t, msg.GetBody().GetOwnerId()) - val2.WriteToV2(&m) - require.Equal(t, usrV2, *m.GetBody().GetOwnerID()) + dst.ForUser(otherUsr) + src.ForUser(usr) + err = dst.Unmarshal(src.Marshal()) + require.NoError(t, err) + require.True(t, dst.AssertUser(usr)) + }) + t.Run("api", func(t *testing.T) { + src := bearertest.Token() + var dst bearer.Token + var msg apiacl.BearerToken + + src.ForUser(usr) + src.WriteToV2(&msg) + err := dst.ReadFromV2(&msg) + require.NoError(t, err) + require.True(t, dst.AssertUser(usr)) + }) + t.Run("json", func(t *testing.T) { + var src, dst bearer.Token + var msg apiacl.BearerToken + + dst.ForUser(usr) + + j, err := src.MarshalJSON() + require.NoError(t, err) + err = dst.UnmarshalJSON(j) + require.NoError(t, err) + require.True(t, dst.AssertUser(otherUsr)) + dst.WriteToV2(&msg) + require.Nil(t, msg.GetBody().GetOwnerId()) + + dst.ForUser(otherUsr) + src.ForUser(usr) + j, err = src.MarshalJSON() + require.NoError(t, err) + err = dst.UnmarshalJSON(j) + require.NoError(t, err) + require.True(t, dst.AssertUser(usr)) + }) + }) } -func testLifetimeClaim(t *testing.T, setter func(*bearer.Token, uint64), getter func(*acl.BearerToken) uint64) { - var val bearer.Token - var m acl.BearerToken - filled := bearertest.Token(t) - - val.WriteToV2(&m) - require.Zero(t, m.GetBody()) - - val2 := filled - - require.NoError(t, val2.Unmarshal(val.Marshal())) - - val2.WriteToV2(&m) - require.Zero(t, m.GetBody()) - - val2 = filled - - jd, err := val.MarshalJSON() - require.NoError(t, err) - - require.NoError(t, val2.UnmarshalJSON(jd)) - - val2.WriteToV2(&m) - require.Zero(t, m.GetBody()) - - // set value - exp := rand.Uint64() +func TestToken_SetExp(t *testing.T) { + var tok bearer.Token + + require.True(t, tok.InvalidAt(13)) + require.True(t, tok.InvalidAt(14)) + require.True(t, tok.InvalidAt(15)) + + tok.SetExp(13) + require.False(t, tok.InvalidAt(13)) + require.True(t, tok.InvalidAt(14)) + require.True(t, tok.InvalidAt(15)) + + tok.SetExp(14) + require.False(t, tok.InvalidAt(13)) + require.False(t, tok.InvalidAt(14)) + require.True(t, tok.InvalidAt(15)) + + t.Run("encoding", func(t *testing.T) { + t.Run("binary", func(t *testing.T) { + var src, dst bearer.Token + var msg apiacl.BearerToken + + dst.SetExp(13) + + err := dst.Unmarshal(src.Marshal()) + require.NoError(t, err) + require.True(t, dst.InvalidAt(0)) + dst.WriteToV2(&msg) + require.Zero(t, msg.GetBody().GetLifetime().GetExp()) + + dst.SetExp(42) + src.SetExp(13) + err = dst.Unmarshal(src.Marshal()) + require.NoError(t, err) + require.False(t, dst.InvalidAt(13)) + require.True(t, dst.InvalidAt(14)) + dst.WriteToV2(&msg) + require.EqualValues(t, 13, msg.Body.Lifetime.Exp) + }) + t.Run("api", func(t *testing.T) { + src := bearertest.Token() + src.SetNbf(0) + src.SetIat(0) + var dst bearer.Token + var msg apiacl.BearerToken + + src.SetExp(13) + src.WriteToV2(&msg) + err := dst.ReadFromV2(&msg) + require.NoError(t, err) + require.False(t, dst.InvalidAt(13)) + require.True(t, dst.InvalidAt(14)) + require.EqualValues(t, 13, msg.Body.Lifetime.Exp) + }) + t.Run("json", func(t *testing.T) { + var src, dst bearer.Token + var msg apiacl.BearerToken + + dst.SetExp(13) + + j, err := src.MarshalJSON() + require.NoError(t, err) + err = dst.UnmarshalJSON(j) + require.NoError(t, err) + require.True(t, dst.InvalidAt(0)) + dst.WriteToV2(&msg) + require.Zero(t, msg.GetBody().GetLifetime().GetExp()) + + dst.SetExp(42) + src.SetExp(13) + j, err = src.MarshalJSON() + require.NoError(t, err) + err = dst.UnmarshalJSON(j) + require.NoError(t, err) + require.False(t, dst.InvalidAt(13)) + require.True(t, dst.InvalidAt(14)) + dst.WriteToV2(&msg) + require.EqualValues(t, 13, msg.Body.Lifetime.Exp) + }) + }) +} - setter(&val, exp) +func TestToken_SetIat(t *testing.T) { + var tok bearer.Token + + require.True(t, tok.InvalidAt(13)) + require.True(t, tok.InvalidAt(14)) + require.True(t, tok.InvalidAt(15)) + + tok.SetExp(15) + tok.SetIat(14) + require.True(t, tok.InvalidAt(13)) + require.False(t, tok.InvalidAt(14)) + require.False(t, tok.InvalidAt(15)) + + tok.SetIat(15) + require.True(t, tok.InvalidAt(13)) + require.True(t, tok.InvalidAt(14)) + require.False(t, tok.InvalidAt(15)) + + t.Run("encoding", func(t *testing.T) { + t.Run("binary", func(t *testing.T) { + var src, dst bearer.Token + var msg apiacl.BearerToken + + dst.SetExp(15) + + err := dst.Unmarshal(src.Marshal()) + require.NoError(t, err) + require.True(t, dst.InvalidAt(0)) + dst.WriteToV2(&msg) + require.Zero(t, msg.GetBody().GetLifetime().GetIat()) + + dst.SetIat(42) + src.SetExp(15) + src.SetIat(14) + err = dst.Unmarshal(src.Marshal()) + require.NoError(t, err) + require.True(t, dst.InvalidAt(13)) + require.False(t, dst.InvalidAt(14)) + dst.WriteToV2(&msg) + require.EqualValues(t, 14, msg.Body.Lifetime.Iat) + }) + t.Run("api", func(t *testing.T) { + src := bearertest.Token() + src.SetNbf(0) + src.SetExp(15) + var dst bearer.Token + var msg apiacl.BearerToken + + src.SetIat(14) + src.WriteToV2(&msg) + err := dst.ReadFromV2(&msg) + require.NoError(t, err) + require.True(t, dst.InvalidAt(13)) + require.False(t, dst.InvalidAt(14)) + require.EqualValues(t, 14, msg.Body.Lifetime.Iat) + }) + t.Run("json", func(t *testing.T) { + var src, dst bearer.Token + var msg apiacl.BearerToken + + dst.SetExp(15) + + j, err := src.MarshalJSON() + require.NoError(t, err) + err = dst.UnmarshalJSON(j) + require.NoError(t, err) + require.True(t, dst.InvalidAt(0)) + dst.WriteToV2(&msg) + require.Zero(t, msg.GetBody().GetLifetime().GetIat()) + + dst.SetIat(42) + src.SetExp(15) + src.SetIat(14) + j, err = src.MarshalJSON() + require.NoError(t, err) + err = dst.UnmarshalJSON(j) + require.NoError(t, err) + require.True(t, dst.InvalidAt(13)) + require.False(t, dst.InvalidAt(14)) + dst.WriteToV2(&msg) + require.EqualValues(t, 14, msg.Body.Lifetime.Iat) + }) + }) +} - val.WriteToV2(&m) - require.Equal(t, exp, getter(&m)) +func TestToken_SetNbf(t *testing.T) { + var tok bearer.Token + + require.True(t, tok.InvalidAt(13)) + require.True(t, tok.InvalidAt(14)) + require.True(t, tok.InvalidAt(15)) + + tok.SetExp(15) + tok.SetNbf(14) + require.True(t, tok.InvalidAt(13)) + require.False(t, tok.InvalidAt(14)) + require.False(t, tok.InvalidAt(15)) + + tok.SetNbf(15) + require.True(t, tok.InvalidAt(13)) + require.True(t, tok.InvalidAt(14)) + require.False(t, tok.InvalidAt(15)) + + t.Run("encoding", func(t *testing.T) { + t.Run("binary", func(t *testing.T) { + var src, dst bearer.Token + var msg apiacl.BearerToken + + dst.SetExp(15) + + err := dst.Unmarshal(src.Marshal()) + require.NoError(t, err) + require.True(t, dst.InvalidAt(0)) + dst.WriteToV2(&msg) + require.Zero(t, msg.GetBody().GetLifetime().GetNbf()) + + dst.SetNbf(42) + src.SetExp(15) + src.SetNbf(14) + err = dst.Unmarshal(src.Marshal()) + require.NoError(t, err) + require.True(t, dst.InvalidAt(13)) + require.False(t, dst.InvalidAt(14)) + dst.WriteToV2(&msg) + require.EqualValues(t, 14, msg.Body.Lifetime.Nbf) + }) + t.Run("api", func(t *testing.T) { + src := bearertest.Token() + src.SetIat(0) + src.SetExp(15) + var dst bearer.Token + var msg apiacl.BearerToken + + src.SetNbf(14) + src.WriteToV2(&msg) + err := dst.ReadFromV2(&msg) + require.NoError(t, err) + require.True(t, dst.InvalidAt(13)) + require.False(t, dst.InvalidAt(14)) + require.EqualValues(t, 14, msg.Body.Lifetime.Nbf) + }) + t.Run("json", func(t *testing.T) { + var src, dst bearer.Token + var msg apiacl.BearerToken + + dst.SetExp(15) + + j, err := src.MarshalJSON() + require.NoError(t, err) + err = dst.UnmarshalJSON(j) + require.NoError(t, err) + require.True(t, dst.InvalidAt(0)) + dst.WriteToV2(&msg) + require.Zero(t, msg.GetBody().GetLifetime().GetNbf()) + + dst.SetNbf(42) + src.SetExp(15) + src.SetNbf(14) + j, err = src.MarshalJSON() + require.NoError(t, err) + err = dst.UnmarshalJSON(j) + require.NoError(t, err) + require.True(t, dst.InvalidAt(13)) + require.False(t, dst.InvalidAt(14)) + dst.WriteToV2(&msg) + require.EqualValues(t, 14, msg.Body.Lifetime.Nbf) + }) + }) +} - val2 = filled +func TestToken_Issuer(t *testing.T) { + var tok bearer.Token - require.NoError(t, val2.Unmarshal(val.Marshal())) + require.Zero(t, tok.Issuer()) - val2.WriteToV2(&m) - require.Equal(t, exp, getter(&m)) + usr := usertest.ID() + tok.SetIssuer(usr) + require.Equal(t, usr, tok.Issuer()) - val2 = filled + otherUsr := usertest.ChangeID(usr) + tok.SetIssuer(otherUsr) + require.Equal(t, otherUsr, tok.Issuer()) - jd, err = val.MarshalJSON() - require.NoError(t, err) + t.Run("encoding", func(t *testing.T) { + t.Run("binary", func(t *testing.T) { + var src, dst bearer.Token - require.NoError(t, val2.UnmarshalJSON(jd)) + dst.SetIssuer(usr) - val2.WriteToV2(&m) - require.Equal(t, exp, getter(&m)) -} + err := dst.Unmarshal(src.Marshal()) + require.NoError(t, err) + require.Zero(t, dst.Issuer()) -func TestToken_SetLifetime(t *testing.T) { - t.Run("iat", func(t *testing.T) { - testLifetimeClaim(t, (*bearer.Token).SetIat, func(token *acl.BearerToken) uint64 { - return token.GetBody().GetLifetime().GetIat() + dst.SetIssuer(otherUsr) + src.SetIssuer(usr) + err = dst.Unmarshal(src.Marshal()) + require.NoError(t, err) + require.Equal(t, usr, dst.Issuer()) }) - }) - - t.Run("nbf", func(t *testing.T) { - testLifetimeClaim(t, (*bearer.Token).SetNbf, func(token *acl.BearerToken) uint64 { - return token.GetBody().GetLifetime().GetNbf() + t.Run("api", func(t *testing.T) { + src := bearertest.Token() + var dst bearer.Token + var msg apiacl.BearerToken + + src.SetIssuer(usr) + src.WriteToV2(&msg) + err := dst.ReadFromV2(&msg) + require.NoError(t, err) + require.Equal(t, usr, dst.Issuer()) }) - }) - - t.Run("exp", func(t *testing.T) { - testLifetimeClaim(t, (*bearer.Token).SetExp, func(token *acl.BearerToken) uint64 { - return token.GetBody().GetLifetime().GetExp() + t.Run("json", func(t *testing.T) { + var src, dst bearer.Token + + dst.SetIssuer(usr) + + j, err := src.MarshalJSON() + require.NoError(t, err) + err = dst.UnmarshalJSON(j) + require.NoError(t, err) + require.Zero(t, dst.Issuer()) + + dst.SetIssuer(otherUsr) + src.SetIssuer(usr) + j, err = src.MarshalJSON() + require.NoError(t, err) + err = dst.UnmarshalJSON(j) + require.NoError(t, err) + require.Equal(t, usr, dst.Issuer()) }) }) } @@ -222,56 +493,26 @@ func TestToken_InvalidAt(t *testing.T) { require.True(t, val.InvalidAt(5)) } -func TestToken_AssertContainer(t *testing.T) { - var val bearer.Token - cnr := cidtest.ID() - - require.True(t, val.AssertContainer(cnr)) - - eaclTable := eacltest.Table(t) - - eaclTable.SetCID(cidtest.ID()) - val.SetEACLTable(eaclTable) - require.False(t, val.AssertContainer(cnr)) - - eaclTable.SetCID(cnr) - val.SetEACLTable(eaclTable) - require.True(t, val.AssertContainer(cnr)) -} - -func TestToken_AssertUser(t *testing.T) { - var val bearer.Token - usr := usertest.ID(t) - - require.True(t, val.AssertUser(usr)) - - val.ForUser(usertest.ID(t)) - require.False(t, val.AssertUser(usr)) - - val.ForUser(usr) - require.True(t, val.AssertUser(usr)) -} - func TestToken_Sign(t *testing.T) { var val bearer.Token require.False(t, val.VerifySignature()) - signer := test.RandomSignerRFC6979(t) + usr, _ := usertest.TwoUsers() - val = bearertest.Token(t) + val = bearertest.Token() - require.NoError(t, val.Sign(signer)) + require.NoError(t, val.Sign(usr)) require.True(t, val.VerifySignature()) - var m acl.BearerToken + var m apiacl.BearerToken val.WriteToV2(&m) require.NotZero(t, m.GetSignature().GetKey()) require.NotZero(t, m.GetSignature().GetSign()) - val2 := bearertest.Token(t) + val2 := bearertest.Token() require.NoError(t, val2.Unmarshal(val.Marshal())) require.True(t, val2.VerifySignature()) @@ -279,7 +520,7 @@ func TestToken_Sign(t *testing.T) { jd, err := val.MarshalJSON() require.NoError(t, err) - val2 = bearertest.Token(t) + val2 = bearertest.Token() require.NoError(t, val2.UnmarshalJSON(jd)) require.True(t, val2.VerifySignature()) } @@ -295,82 +536,118 @@ func TestToken_SignedData(t *testing.T) { require.Equal(t, val, dec) signer := test.RandomSignerRFC6979(t) - val = bearertest.Token(t) + val = bearertest.Token() val.SetIssuer(signer.UserID()) test.SignedDataComponentUser(t, signer, &val) } func TestToken_ReadFromV2(t *testing.T) { - var val bearer.Token - var m acl.BearerToken - - require.Error(t, val.ReadFromV2(m)) - - var body acl.BearerTokenBody - m.SetBody(&body) - - require.Error(t, val.ReadFromV2(m)) - - eaclTable := eacltest.Table(t) - eaclTableV2 := eaclTable.ToV2() - body.SetEACL(eaclTableV2) - - require.Error(t, val.ReadFromV2(m)) - - var lifetime acl.TokenLifetime - body.SetLifetime(&lifetime) - - require.Error(t, val.ReadFromV2(m)) - - const iat, nbf, exp = 1, 2, 3 - lifetime.SetIat(iat) - lifetime.SetNbf(nbf) - lifetime.SetExp(exp) - - body.SetLifetime(&lifetime) - - require.Error(t, val.ReadFromV2(m)) - - var sig refs.Signature - m.SetSignature(&sig) - - require.NoError(t, val.ReadFromV2(m)) - - var m2 acl.BearerToken - - val.WriteToV2(&m2) - require.Equal(t, m, m2) - - usr, usr2 := usertest.ID(t), usertest.ID(t) - - require.True(t, val.AssertUser(usr)) - require.True(t, val.AssertUser(usr2)) - - var usrV2 refs.OwnerID - usr.WriteToV2(&usrV2) - - body.SetOwnerID(&usrV2) - - require.NoError(t, val.ReadFromV2(m)) - - val.WriteToV2(&m2) - require.Equal(t, m, m2) - - require.True(t, val.AssertUser(usr)) - require.False(t, val.AssertUser(usr2)) - - signer := test.RandomSigner(t) - - var s neofscrypto.Signature - - require.NoError(t, s.CalculateMarshalled(signer, &body, nil)) - - s.WriteToV2(&sig) + t.Run("missing fields", func(t *testing.T) { + t.Run("signature", func(t *testing.T) { + tok := bearertest.Token() + var m apiacl.BearerToken + tok.WriteToV2(&m) + require.ErrorContains(t, tok.ReadFromV2(&m), "missing body signature") + }) + t.Run("body", func(t *testing.T) { + tok := bearertest.SignedToken(t) + var m apiacl.BearerToken + tok.WriteToV2(&m) + m.Body = nil + require.ErrorContains(t, tok.ReadFromV2(&m), "missing token body") + }) + t.Run("eACL", func(t *testing.T) { + tok := bearertest.SignedToken(t) + var m apiacl.BearerToken + tok.WriteToV2(&m) + m.Body.EaclTable = nil + require.ErrorContains(t, tok.ReadFromV2(&m), "missing eACL table") + }) + t.Run("lifetime", func(t *testing.T) { + tok := bearertest.SignedToken(t) + var m apiacl.BearerToken + tok.WriteToV2(&m) + m.Body.Lifetime = nil + require.ErrorContains(t, tok.ReadFromV2(&m), "missing token lifetime") + }) + }) + t.Run("invalid fields", func(t *testing.T) { + t.Run("target user", func(t *testing.T) { + tok := bearertest.SignedToken(t) + var m apiacl.BearerToken + tok.WriteToV2(&m) + + m.Body.OwnerId.Value = []byte("not_a_user") + require.ErrorContains(t, tok.ReadFromV2(&m), "invalid target user") + }) + t.Run("issuer", func(t *testing.T) { + tok := bearertest.SignedToken(t) + var m apiacl.BearerToken + tok.WriteToV2(&m) - require.NoError(t, val.ReadFromV2(m)) - require.True(t, val.VerifySignature()) - require.Equal(t, sig.GetKey(), val.SigningKeyBytes()) + m.Body.Issuer.Value = []byte("not_a_user") + require.ErrorContains(t, tok.ReadFromV2(&m), "invalid issuer") + }) + t.Run("signature", func(t *testing.T) { + t.Run("public key", func(t *testing.T) { + tok := bearertest.SignedToken(t) + var m apiacl.BearerToken + tok.WriteToV2(&m) + + m.Signature.Key = nil + require.ErrorContains(t, tok.ReadFromV2(&m), "invalid body signature: missing public key") + m.Signature.Key = []byte("not_a_key") + require.ErrorContains(t, tok.ReadFromV2(&m), "invalid body signature: decode public key from binary") + }) + t.Run("value", func(t *testing.T) { + tok := bearertest.SignedToken(t) + var m apiacl.BearerToken + tok.WriteToV2(&m) + + m.Signature.Sign = nil + require.ErrorContains(t, tok.ReadFromV2(&m), "invalid body signature: missing signature") + }) + }) + t.Run("eACL", func(t *testing.T) { + t.Run("records", func(t *testing.T) { + t.Run("targets", func(t *testing.T) { + rs := eacltest.NRecords(2) + rs[1].SetTargets(eacltest.NTargets(3)) + tbl := eacltest.Table() + tbl.SetRecords(rs) + tok := bearertest.SignedToken(t) + tok.SetEACLTable(tbl) + var m apiacl.BearerToken + tok.WriteToV2(&m) + + m.Body.EaclTable.Records[1].Targets[2].Role, m.Body.EaclTable.Records[1].Targets[2].Keys = 0, nil + err := tok.ReadFromV2(&m) + require.ErrorContains(t, err, "invalid eACL table: invalid record #1: invalid target #2: role and public keys are not mutually exclusive") + m.Body.EaclTable.Records[1].Targets[2].Role, m.Body.EaclTable.Records[1].Targets[2].Keys = 1, make([][]byte, 1) + err = tok.ReadFromV2(&m) + require.ErrorContains(t, err, "invalid eACL table: invalid record #1: invalid target #2: role and public keys are not mutually exclusive") + m.Body.EaclTable.Records[1].Targets = nil + err = tok.ReadFromV2(&m) + require.ErrorContains(t, err, "invalid eACL table: invalid record #1: missing target subjects") + }) + t.Run("filters", func(t *testing.T) { + rs := eacltest.NRecords(2) + rs[1].SetFilters(eacltest.NFilters(3)) + tbl := eacltest.Table() + tbl.SetRecords(rs) + tok := bearertest.SignedToken(t) + tok.SetEACLTable(tbl) + var m apiacl.BearerToken + tok.WriteToV2(&m) + + m.Body.EaclTable.Records[1].Filters[2].Key = "" + err := tok.ReadFromV2(&m) + require.ErrorContains(t, err, "invalid eACL table: invalid record #1: invalid filter #2: missing key") + }) + }) + }) + }) } func TestResolveIssuer(t *testing.T) { @@ -380,17 +657,6 @@ func TestResolveIssuer(t *testing.T) { require.Zero(t, val.ResolveIssuer()) - var m acl.BearerToken - - var sig refs.Signature - sig.SetKey([]byte("invalid key")) - - m.SetSignature(&sig) - - require.NoError(t, val.Unmarshal(m.StableMarshal(nil))) - - require.Zero(t, val.ResolveIssuer()) - require.NoError(t, val.Sign(signer)) usr := signer.UserID() @@ -399,58 +665,146 @@ func TestResolveIssuer(t *testing.T) { require.Equal(t, usr, val.Issuer()) } -func TestToken_Issuer(t *testing.T) { - var token bearer.Token - var msg acl.BearerToken - filled := bearertest.Token(t) - - token.WriteToV2(&msg) - require.Zero(t, msg.GetBody()) - - val2 := filled - require.NoError(t, val2.Unmarshal(token.Marshal())) - - val2.WriteToV2(&msg) - require.Zero(t, msg.GetBody()) - - val2 = filled - - jd, err := token.MarshalJSON() - require.NoError(t, err) - - require.NoError(t, val2.UnmarshalJSON(jd)) - - val2.WriteToV2(&msg) - require.Zero(t, msg.GetBody()) - - // set value - usr := usertest.ID(t) - - var usrV2 refs.OwnerID - usr.WriteToV2(&usrV2) - - token.SetIssuer(usr) - - require.True(t, usr.Equals(token.Issuer())) - require.True(t, usr.Equals(token.ResolveIssuer())) - - token.WriteToV2(&msg) - require.Equal(t, usrV2, *msg.GetBody().GetIssuer()) - - val2 = filled - - require.NoError(t, val2.Unmarshal(token.Marshal())) - - val2.WriteToV2(&msg) - require.Equal(t, usrV2, *msg.GetBody().GetIssuer()) - - val2 = filled - - jd, err = token.MarshalJSON() - require.NoError(t, err) - - require.NoError(t, val2.UnmarshalJSON(jd)) +func TestToken_Unmarshal(t *testing.T) { + t.Run("invalid binary", func(t *testing.T) { + var tok bearer.Token + msg := []byte("definitely_not_protobuf") + err := tok.Unmarshal(msg) + require.ErrorContains(t, err, "decode protobuf") + }) + t.Run("invalid fields", func(t *testing.T) { + t.Run("target user", func(t *testing.T) { + tok := bearertest.SignedToken(t) + var m apiacl.BearerToken + tok.WriteToV2(&m) + + m.Body.OwnerId.Value = []byte("not_a_user") + b, err := proto.Marshal(&m) + require.NoError(t, err) + require.ErrorContains(t, tok.Unmarshal(b), "invalid target user") + }) + t.Run("issuer", func(t *testing.T) { + tok := bearertest.SignedToken(t) + var m apiacl.BearerToken + tok.WriteToV2(&m) + + m.Body.Issuer.Value = []byte("not_a_user") + b, err := proto.Marshal(&m) + require.NoError(t, err) + require.ErrorContains(t, tok.Unmarshal(b), "invalid issuer") + }) + t.Run("signature", func(t *testing.T) { + t.Run("public key", func(t *testing.T) { + tok := bearertest.SignedToken(t) + var m apiacl.BearerToken + tok.WriteToV2(&m) + + m.Signature.Key = nil + b, err := proto.Marshal(&m) + require.NoError(t, err) + require.ErrorContains(t, tok.Unmarshal(b), "invalid body signature: missing public key") + + m.Signature.Key = []byte("not_a_key") + b, err = proto.Marshal(&m) + require.NoError(t, err) + require.ErrorContains(t, tok.Unmarshal(b), "invalid body signature: decode public key from binary") + }) + t.Run("value", func(t *testing.T) { + tok := bearertest.SignedToken(t) + var m apiacl.BearerToken + tok.WriteToV2(&m) + + m.Signature.Sign = nil + b, err := proto.Marshal(&m) + require.NoError(t, err) + require.ErrorContains(t, tok.Unmarshal(b), "invalid body signature: missing signature") + }) + }) + t.Run("eACL", func(t *testing.T) { + t.Run("container", func(t *testing.T) { + tbl := eacltest.Table() + tok := bearertest.SignedToken(t) + tok.SetEACLTable(tbl) + var m apiacl.BearerToken + tok.WriteToV2(&m) + + m.Body.EaclTable.ContainerId.Value = []byte("not_a_container_ID") + b, err := proto.Marshal(&m) + require.NoError(t, err) + require.ErrorContains(t, tok.Unmarshal(b), "invalid eACL table: invalid container") + }) + }) + }) +} - val2.WriteToV2(&msg) - require.Equal(t, usrV2, *msg.GetBody().GetIssuer()) +func TestToken_UnmarshalJSON(t *testing.T) { + t.Run("invalid json", func(t *testing.T) { + var tok bearer.Token + msg := []byte("definitely_not_protojson") + err := tok.UnmarshalJSON(msg) + require.ErrorContains(t, err, "decode protojson") + }) + t.Run("invalid fields", func(t *testing.T) { + t.Run("target user", func(t *testing.T) { + tok := bearertest.SignedToken(t) + var m apiacl.BearerToken + tok.WriteToV2(&m) + + m.Body.OwnerId.Value = []byte("not_a_user") + b, err := protojson.Marshal(&m) + require.NoError(t, err) + require.ErrorContains(t, tok.UnmarshalJSON(b), "invalid target user") + }) + t.Run("issuer", func(t *testing.T) { + tok := bearertest.SignedToken(t) + var m apiacl.BearerToken + tok.WriteToV2(&m) + + m.Body.Issuer.Value = []byte("not_a_user") + b, err := protojson.Marshal(&m) + require.NoError(t, err) + require.ErrorContains(t, tok.UnmarshalJSON(b), "invalid issuer") + }) + t.Run("signature", func(t *testing.T) { + t.Run("public key", func(t *testing.T) { + tok := bearertest.SignedToken(t) + var m apiacl.BearerToken + tok.WriteToV2(&m) + + m.Signature.Key = nil + b, err := protojson.Marshal(&m) + require.NoError(t, err) + require.ErrorContains(t, tok.UnmarshalJSON(b), "invalid body signature: missing public key") + + m.Signature.Key = []byte("not_a_key") + b, err = protojson.Marshal(&m) + require.NoError(t, err) + require.ErrorContains(t, tok.UnmarshalJSON(b), "invalid body signature: decode public key from binary") + }) + t.Run("value", func(t *testing.T) { + tok := bearertest.SignedToken(t) + var m apiacl.BearerToken + tok.WriteToV2(&m) + + m.Signature.Sign = nil + b, err := protojson.Marshal(&m) + require.NoError(t, err) + require.ErrorContains(t, tok.UnmarshalJSON(b), "invalid body signature: missing signature") + }) + }) + t.Run("eACL", func(t *testing.T) { + t.Run("container", func(t *testing.T) { + tbl := eacltest.Table() + tok := bearertest.SignedToken(t) + tok.SetEACLTable(tbl) + var m apiacl.BearerToken + tok.WriteToV2(&m) + + m.Body.EaclTable.ContainerId.Value = []byte("not_a_container_ID") + b, err := protojson.Marshal(&m) + require.NoError(t, err) + require.ErrorContains(t, tok.UnmarshalJSON(b), "invalid eACL table: invalid container") + }) + }) + }) } diff --git a/bearer/example_test.go b/bearer/example_test.go index a56353b1b..b51ca539e 100644 --- a/bearer/example_test.go +++ b/bearer/example_test.go @@ -14,9 +14,6 @@ import ( // Define bearer token by setting correct lifetime, extended ACL and owner ID of // the user that will attach token to its requests. func Example() { - // import "github.com/nspcc-dev/neofs-sdk-go/eacl" - // import "github.com/nspcc-dev/neofs-sdk-go/user" - var bearerToken bearer.Token var ownerID user.ID var eaclTable eacl.Table @@ -28,8 +25,6 @@ func Example() { bearerToken.ForUser(ownerID) // Bearer token must be signed by owner of the container. - // import neofscrypto "github.com/nspcc-dev/neofs-sdk-go/crypto" - var signer user.Signer // signer initialization, bearerToken initialization, other steps ... @@ -39,11 +34,6 @@ func Example() { // Provide signed token in JSON or binary format to the request sender. Request // sender can attach this bearer token to the object service requests. func ExampleToken_attachToRequest() { - // import "github.com/nspcc-dev/neofs-sdk-go/client" - // import "github.com/nspcc-dev/neofs-sdk-go/user" - // import oid "github.com/nspcc-dev/neofs-sdk-go/object/id" - // import cid "github.com/nspcc-dev/neofs-sdk-go/container/id" - var bearerToken bearer.Token var sdkClient *client.Client var signer user.Signer diff --git a/bearer/test/generate.go b/bearer/test/generate.go index 9de117af3..ffe42c3e9 100644 --- a/bearer/test/generate.go +++ b/bearer/test/generate.go @@ -1,22 +1,32 @@ package bearertest import ( - "testing" + "fmt" + "math/rand" "github.com/nspcc-dev/neofs-sdk-go/bearer" eacltest "github.com/nspcc-dev/neofs-sdk-go/eacl/test" usertest "github.com/nspcc-dev/neofs-sdk-go/user/test" ) -// Token returns random bearer.Token. -// -// Resulting token is unsigned. -func Token(t testing.TB) (tok bearer.Token) { - tok.SetExp(3) - tok.SetNbf(2) - tok.SetIat(1) - tok.ForUser(usertest.ID(t)) - tok.SetEACLTable(eacltest.Table(t)) +// Token returns random bearer.Token. To get unsigned token, use [UnsignedToken]. +func Token() bearer.Token { + usr, _ := usertest.TwoUsers() + tok := UnsignedToken() + if err := tok.Sign(usr); err != nil { + panic(fmt.Errorf("unexpected sign failure: %w", err)) + } + return tok +} +// UnsignedToken returns random unsigned bearer.Token. To get signed token, use +// [Token]. +func UnsignedToken() bearer.Token { + var tok bearer.Token + tok.SetExp(uint64(rand.Int())) + tok.SetNbf(uint64(rand.Int())) + tok.SetIat(uint64(rand.Int())) + tok.ForUser(usertest.ID()) + tok.SetEACLTable(eacltest.Table()) return tok } diff --git a/bearer/test/generate_test.go b/bearer/test/generate_test.go new file mode 100644 index 000000000..2db793059 --- /dev/null +++ b/bearer/test/generate_test.go @@ -0,0 +1,42 @@ +package bearertest_test + +import ( + "testing" + + "github.com/nspcc-dev/neofs-sdk-go/api/acl" + "github.com/nspcc-dev/neofs-sdk-go/bearer" + bearertest "github.com/nspcc-dev/neofs-sdk-go/bearer/test" + "github.com/stretchr/testify/require" +) + +func TestUnsignedToken(t *testing.T) { + tok := bearertest.UnsignedToken() + require.False(t, tok.VerifySignature()) + require.NotEqual(t, tok, bearertest.UnsignedToken()) + + var tok2 bearer.Token + require.NoError(t, tok2.Unmarshal(tok.Marshal())) + require.Equal(t, tok, tok2) + + var m acl.BearerToken + tok.WriteToV2(&m) + var tok3 bearer.Token + require.NoError(t, tok3.ReadFromV2(&m)) + require.Equal(t, tok, tok3) +} + +func TestToken(t *testing.T) { + tok := bearertest.Token() + require.True(t, tok.VerifySignature()) + require.NotEqual(t, tok, bearertest.Token()) + + var tok2 bearer.Token + require.NoError(t, tok2.Unmarshal(tok.Marshal())) + require.Equal(t, tok, tok2) + + var m acl.BearerToken + tok.WriteToV2(&m) + var tok3 bearer.Token + require.NoError(t, tok3.ReadFromV2(&m)) + require.Equal(t, tok, tok3) +} diff --git a/checksum/checksum.go b/checksum/checksum.go index 3f55c1c71..44ad1841f 100644 --- a/checksum/checksum.go +++ b/checksum/checksum.go @@ -1,151 +1,150 @@ package checksum import ( + "bytes" "crypto/sha256" "encoding/hex" "errors" "fmt" + "hash" - "github.com/nspcc-dev/neofs-api-go/v2/refs" + "github.com/nspcc-dev/neofs-sdk-go/api/refs" "github.com/nspcc-dev/tzhash/tz" ) // Checksum represents checksum of some digital data. // -// Checksum is mutually compatible with github.com/nspcc-dev/neofs-api-go/v2/refs.Checksum -// message. See ReadFromV2 / WriteToV2 methods. +// Checksum is mutually compatible with [refs.Checksum] message. See +// [Checksum.ReadFromV2] / [Checksum.WriteToV2] methods. // -// Instances can be created using built-in var declaration. -// -// Note that direct typecast is not safe and may result in loss of compatibility: -// -// _ = Checksum(refs.Checksum{}) // not recommended -type Checksum refs.Checksum +// Instances should be created using one of the constructors. +type Checksum struct { + typ Type + val []byte +} -// Type represents the enumeration -// of checksum types. +// Type represents the enumeration of checksum types. type Type uint8 +// Supported Type values. const ( - // Unknown is an undefined checksum type. - Unknown Type = iota + _ Type = iota + SHA256 // SHA256 + TZ // Tillich-Zémor (homomorphic) +) - // SHA256 is a SHA256 checksum type. - SHA256 +// NewSHA256 constructs SHA256 checksum. +func NewSHA256(h [sha256.Size]byte) Checksum { + return Checksum{typ: SHA256, val: h[:]} +} - // TZ is a Tillich-Zémor checksum type. - TZ -) +// NewTZ constructs Tillich-Zémor homomorphic checksum. +func NewTZ(h [tz.Size]byte) Checksum { + return Checksum{typ: TZ, val: h[:]} +} -// ReadFromV2 reads Checksum from the refs.Checksum message. Checks if the -// message conforms to NeoFS API V2 protocol. +// NewFromHash allows to create Checksum instance from accumulated hash.Hash. It +// is the caller's responsibility to ensure that the hash matches the specified +// type. +func NewFromHash(t Type, h hash.Hash) Checksum { + return Checksum{typ: t, val: h.Sum(nil)} +} + +// CopyTo writes deep copy of the Checksum to dst. +func (c Checksum) CopyTo(dst *Checksum) { + dst.typ = c.typ + dst.val = bytes.Clone(c.val) +} + +// ReadFromV2 reads Checksum from the refs.Checksum message. Returns an error if +// the message is malformed according to the NeoFS API V2 protocol. The message +// must not be nil. +// +// ReadFromV2 is intended to be used by the NeoFS API V2 client/server +// implementation only and is not expected to be directly used by applications. // -// See also WriteToV2. -func (c *Checksum) ReadFromV2(m refs.Checksum) error { - if len(m.GetSum()) == 0 { +// See also [Checksum.WriteToV2]. +func (c *Checksum) ReadFromV2(m *refs.Checksum) error { + if len(m.Sum) == 0 { return errors.New("missing value") } - switch m.GetType() { + switch m.Type { default: - return fmt.Errorf("unsupported type %v", m.GetType()) - case refs.SHA256, refs.TillichZemor: + c.typ = Type(m.Type) + case refs.ChecksumType_SHA256: + c.typ = SHA256 + case refs.ChecksumType_TZ: + c.typ = TZ } - *c = Checksum(m) + c.val = m.Sum return nil } -// WriteToV2 writes Checksum to the refs.Checksum message. -// The message must not be nil. +// WriteToV2 writes Checksum to the refs.Checksum message of the NeoFS API +// protocol. // -// See also ReadFromV2. +// WriteToV2 is intended to be used by the NeoFS API V2 client/server +// implementation only and is not expected to be directly used by applications. +// +// See also [Checksum.ReadFromV2]. func (c Checksum) WriteToV2(m *refs.Checksum) { - *m = (refs.Checksum)(c) + switch c.typ { + default: + m.Type = refs.ChecksumType(c.typ) + case SHA256: + m.Type = refs.ChecksumType_SHA256 + case TZ: + m.Type = refs.ChecksumType_TZ + } + m.Sum = c.val } // Type returns checksum type. // -// Zero Checksum has Unknown checksum type. -// -// See also SetTillichZemor and SetSHA256. +// Zero Checksum is of zero type. func (c Checksum) Type() Type { - v2 := (refs.Checksum)(c) - switch v2.GetType() { - case refs.SHA256: - return SHA256 - case refs.TillichZemor: - return TZ - default: - return Unknown - } + return c.typ } -// Value returns checksum bytes. Return value -// MUST NOT be mutated. +// Value returns checksum bytes. // -// Zero Checksum has nil sum. +// Zero Checksum has nil value. // // The value returned shares memory with the structure itself, so changing it can lead to data corruption. // Make a copy if you need to change it. -// -// See also SetTillichZemor and SetSHA256. func (c Checksum) Value() []byte { - v2 := (refs.Checksum)(c) - return v2.GetSum() + return c.val } -// SetSHA256 sets checksum to SHA256 hash. -// -// See also Calculate. -func (c *Checksum) SetSHA256(v [sha256.Size]byte) { - v2 := (*refs.Checksum)(c) - - v2.SetType(refs.SHA256) - v2.SetSum(v[:]) -} - -// Calculate calculates checksum and sets it -// to the passed checksum. Checksum must not be nil. -// -// Does nothing if the passed type is not one of the: -// - SHA256; -// - TZ. +// Calculate calculates checksum of given type for passed data. Calculate panics +// on any unsupported type, use constants defined in these package only. // // Does not mutate the passed value. // -// See also SetSHA256, SetTillichZemor. -func Calculate(c *Checksum, t Type, v []byte) { - switch t { +// See also [NewSHA256], [NewTZ], [NewFromHash]. +func Calculate(typ Type, data []byte) Checksum { + switch typ { case SHA256: - c.SetSHA256(sha256.Sum256(v)) + return NewSHA256(sha256.Sum256(data)) case TZ: - c.SetTillichZemor(tz.Sum(v)) + return NewTZ(tz.Sum(data)) default: + panic(fmt.Errorf("unsupported checksum type %v", typ)) } } -// SetTillichZemor sets checksum to Tillich-Zémor hash. -// -// See also Calculate. -func (c *Checksum) SetTillichZemor(v [tz.Size]byte) { - v2 := (*refs.Checksum)(c) - - v2.SetType(refs.TillichZemor) - v2.SetSum(v[:]) -} - -// String implements fmt.Stringer. +// String implements [fmt.Stringer]. // // String is designed to be human-readable, and its format MAY differ between // SDK versions. func (c Checksum) String() string { - v2 := (refs.Checksum)(c) - return fmt.Sprintf("%s:%s", c.Type(), hex.EncodeToString(v2.GetSum())) + return fmt.Sprintf("%s:%s", c.typ, hex.EncodeToString(c.val)) } -// String implements fmt.Stringer. +// String implements [fmt.Stringer]. // // String is designed to be human-readable, and its format MAY differ between // SDK versions. @@ -154,11 +153,11 @@ func (m Type) String() string { switch m { default: - m2 = refs.UnknownChecksum + m2 = refs.ChecksumType(m) case TZ: - m2 = refs.TillichZemor + m2 = refs.ChecksumType_TZ case SHA256: - m2 = refs.SHA256 + m2 = refs.ChecksumType_SHA256 } return m2.String() diff --git a/checksum/checksum_test.go b/checksum/checksum_test.go index f9d22c953..0e2dfb236 100644 --- a/checksum/checksum_test.go +++ b/checksum/checksum_test.go @@ -1,80 +1,169 @@ -package checksum +package checksum_test import ( - "crypto/rand" + "bytes" "crypto/sha256" + "hash" + "hash/adler32" + "math/rand" "testing" - "github.com/nspcc-dev/neofs-api-go/v2/refs" + "github.com/nspcc-dev/neofs-sdk-go/api/refs" + "github.com/nspcc-dev/neofs-sdk-go/checksum" + checksumtest "github.com/nspcc-dev/neofs-sdk-go/checksum/test" "github.com/nspcc-dev/tzhash/tz" "github.com/stretchr/testify/require" ) -func TestChecksum(t *testing.T) { - var c Checksum +func TestChecksum_CopyTo(t *testing.T) { + src := checksumtest.Checksum() + var dst checksum.Checksum + src.CopyTo(&dst) - cSHA256 := [sha256.Size]byte{} - _, _ = rand.Read(cSHA256[:]) + require.Equal(t, src.Value(), dst.Value()) + require.Equal(t, src.Type(), dst.Type()) - c.SetSHA256(cSHA256) - - require.Equal(t, SHA256, c.Type()) - require.Equal(t, cSHA256[:], c.Value()) - - var cV2 refs.Checksum - c.WriteToV2(&cV2) - - require.Equal(t, refs.SHA256, cV2.GetType()) - require.Equal(t, cSHA256[:], cV2.GetSum()) - - cTZ := [tz.Size]byte{} - _, _ = rand.Read(cSHA256[:]) - - c.SetTillichZemor(cTZ) - - require.Equal(t, TZ, c.Type()) - require.Equal(t, cTZ[:], c.Value()) - - c.WriteToV2(&cV2) - - require.Equal(t, refs.TillichZemor, cV2.GetType()) - require.Equal(t, cTZ[:], cV2.GetSum()) + originVal := src.Value() + originValCp := bytes.Clone(originVal) + originVal[0]++ + require.Equal(t, originVal, src.Value()) + require.Equal(t, originValCp, dst.Value()) } -func TestNewChecksum(t *testing.T) { - t.Run("default values", func(t *testing.T) { - var chs Checksum - - // check initial values - require.Equal(t, Unknown, chs.Type()) - require.Nil(t, chs.Value()) +func TestChecksumDecoding(t *testing.T) { + t.Run("missing fields", func(t *testing.T) { + for _, testCase := range []struct { + name, err string + corrupt func(*refs.Checksum) + }{ + {name: "value/nil", err: "missing value", corrupt: func(cs *refs.Checksum) { + cs.Sum = nil + }}, + {name: "value/empty", err: "missing value", corrupt: func(cs *refs.Checksum) { + cs.Sum = []byte{} + }}, + } { + t.Run(testCase.name, func(t *testing.T) { + var src, dst checksum.Checksum + var m refs.Checksum + + src.WriteToV2(&m) + testCase.corrupt(&m) + require.ErrorContains(t, dst.ReadFromV2(&m), testCase.err) + }) + } + }) +} - // convert to v2 message - var chsV2 refs.Checksum - chs.WriteToV2(&chsV2) +func TestNewSHA256(t *testing.T) { + var val [sha256.Size]byte + rand.Read(val[:]) + cs := checksum.NewSHA256(val) + require.Equal(t, checksum.SHA256, cs.Type()) + require.Equal(t, val[:], cs.Value()) + + otherVal := val + otherVal[0]++ + cs = checksum.NewSHA256(otherVal) + require.Equal(t, checksum.SHA256, cs.Type()) + require.Equal(t, otherVal[:], cs.Value()) + + t.Run("encoding", func(t *testing.T) { + t.Run("api", func(t *testing.T) { + src := checksum.NewSHA256(val) + var dst checksum.Checksum + var m refs.Checksum + + src.WriteToV2(&m) + require.Equal(t, refs.ChecksumType_SHA256, m.Type) + require.Equal(t, val[:], m.Sum) + require.NoError(t, dst.ReadFromV2(&m)) + require.Equal(t, checksum.SHA256, dst.Type()) + require.Equal(t, val[:], dst.Value()) + }) + }) +} - require.Equal(t, refs.UnknownChecksum, chsV2.GetType()) - require.Nil(t, chsV2.GetSum()) +func TestNewTZ(t *testing.T) { + var val [tz.Size]byte + rand.Read(val[:]) + cs := checksum.NewTZ(val) + require.Equal(t, checksum.TZ, cs.Type()) + require.Equal(t, val[:], cs.Value()) + + otherVal := val + otherVal[0]++ + cs = checksum.NewTZ(otherVal) + require.Equal(t, checksum.TZ, cs.Type()) + require.Equal(t, otherVal[:], cs.Value()) + + t.Run("encoding", func(t *testing.T) { + t.Run("api", func(t *testing.T) { + src := checksum.NewTZ(val) + var dst checksum.Checksum + var m refs.Checksum + + src.WriteToV2(&m) + require.Equal(t, refs.ChecksumType_TZ, m.Type) + require.Equal(t, val[:], m.Sum) + require.NoError(t, dst.ReadFromV2(&m)) + require.Equal(t, checksum.TZ, dst.Type()) + require.Equal(t, val[:], dst.Value()) + }) }) } -func TestCalculation(t *testing.T) { - var c Checksum - payload := []byte{0, 1, 2, 3, 4, 5} +func TestNewFromHash(t *testing.T) { + var h hash.Hash = adler32.New() // any hash just for example + h.Write([]byte("Hello, world!")) + hb := []byte{32, 94, 4, 138} + + typ := checksum.Type(rand.Uint32() % 256) + cs := checksum.NewFromHash(typ, h) + require.Equal(t, typ, cs.Type()) + require.Equal(t, hb, cs.Value()) + + t.Run("encoding", func(t *testing.T) { + t.Run("api", func(t *testing.T) { + src := checksum.NewFromHash(typ, h) + var dst checksum.Checksum + var m refs.Checksum + + src.WriteToV2(&m) + switch typ { + default: + require.EqualValues(t, typ, m.Type) + case checksum.TZ: + require.Equal(t, refs.ChecksumType_TZ, m.Type) + case checksum.SHA256: + require.Equal(t, refs.ChecksumType_SHA256, m.Type) + } + require.Equal(t, hb, m.Sum) + require.NoError(t, dst.ReadFromV2(&m)) + require.Equal(t, typ, dst.Type()) + require.Equal(t, hb, dst.Value()) + }) + }) +} - t.Run("SHA256", func(t *testing.T) { - orig := sha256.Sum256(payload) +func TestCalculate(t *testing.T) { + payload := []byte("Hello, world!") + hSHA256 := [32]byte{49, 95, 91, 219, 118, 208, 120, 196, 59, 138, 192, 6, 78, 74, 1, 100, 97, 43, 31, 206, 119, 200, 105, 52, 91, 252, 148, 199, 88, 148, 237, 211} + hTZ := [64]byte{0, 0, 1, 66, 73, 241, 7, 149, 192, 36, 14, 221, 202, 138, 110, 191, 0, 0, 1, 201, 196, 220, 152, 176, 23, 253, 146, 173, 98, 151, 156, 140, + 0, 0, 0, 141, 148, 205, 152, 164, 87, 185, 131, 233, 55, 131, 141, 205, 0, 0, 0, 219, 200, 104, 158, 117, 199, 221, 137, 37, 173, 13, 247, 39} - Calculate(&c, SHA256, payload) + require.Panics(t, func() { checksum.Calculate(0, []byte("any")) }) + require.Panics(t, func() { checksum.Calculate(checksum.TZ+1, []byte("any")) }) - require.Equal(t, orig[:], c.Value()) + t.Run("SHA256", func(t *testing.T) { + c := checksum.Calculate(checksum.SHA256, payload) + require.Equal(t, checksum.SHA256, c.Type()) + require.Equal(t, hSHA256[:], c.Value()) }) t.Run("TZ", func(t *testing.T) { - orig := tz.Sum(payload) - - Calculate(&c, TZ, payload) - - require.Equal(t, orig[:], c.Value()) + c := checksum.Calculate(checksum.TZ, payload) + require.Equal(t, checksum.TZ, c.Type()) + require.Equal(t, hTZ[:], c.Value()) }) } diff --git a/checksum/example_test.go b/checksum/example_test.go index 5a18a484e..00faa572b 100644 --- a/checksum/example_test.go +++ b/checksum/example_test.go @@ -6,42 +6,39 @@ import ( "fmt" "math/rand" - "github.com/nspcc-dev/neofs-api-go/v2/refs" + "github.com/nspcc-dev/neofs-sdk-go/api/refs" ) func ExampleCalculate() { payload := []byte{0, 1, 2, 3, 4, 5, 6} - var checksum Checksum // checksum contains SHA256 hash of the payload - Calculate(&checksum, SHA256, payload) + cs := Calculate(SHA256, payload) + fmt.Println(cs.Type(), cs.Value()) // checksum contains TZ hash of the payload - Calculate(&checksum, TZ, payload) + cs = Calculate(TZ, payload) + fmt.Println(cs.Type(), cs.Value()) } // Instances can be also used to process NeoFS API V2 protocol messages with [https://github.com/nspcc-dev/neofs-api] package. func ExampleChecksum_marshalling() { - var ( - csRaw [sha256.Size]byte - csV2 refs.Checksum - cs Checksum - ) - + var h [sha256.Size]byte //nolint:staticcheck - rand.Read(csRaw[:]) - cs.SetSHA256(csRaw) + rand.Read(h[:]) - // On the client side. + cs := NewSHA256(h) - cs.WriteToV2(&csV2) + // On the client side. + var msg refs.Checksum + cs.WriteToV2(&msg) - fmt.Println(bytes.Equal(cs.Value(), csV2.GetSum())) + fmt.Println(bytes.Equal(cs.Value(), msg.GetSum())) // Example output: true // *send message* // On the server side. - _ = cs.ReadFromV2(csV2) + _ = cs.ReadFromV2(&msg) } diff --git a/checksum/test/generate.go b/checksum/test/generate.go index 7a6f90674..78afa719c 100644 --- a/checksum/test/generate.go +++ b/checksum/test/generate.go @@ -1,21 +1,23 @@ package checksumtest import ( - "crypto/sha256" "math/rand" "github.com/nspcc-dev/neofs-sdk-go/checksum" ) -// Checksum returns random checksum.Checksum. -func Checksum() checksum.Checksum { - var cs [sha256.Size]byte - //nolint:staticcheck - rand.Read(cs[:]) +type fixedHash []byte - var x checksum.Checksum +func (x fixedHash) Sum([]byte) []byte { + return x +} - x.SetSHA256(cs) +func (x fixedHash) Write([]byte) (n int, err error) { panic("unexpected call") } +func (x fixedHash) Reset() { panic("unexpected call") } +func (x fixedHash) Size() int { panic("unexpected call") } +func (x fixedHash) BlockSize() int { panic("unexpected call") } - return x +// Checksum returns random checksum.Checksum. +func Checksum() checksum.Checksum { + return checksum.NewFromHash(checksum.Type(rand.Uint32()%256), fixedHash("Hello, world!")) } diff --git a/checksum/test/generate_test.go b/checksum/test/generate_test.go new file mode 100644 index 000000000..47063666a --- /dev/null +++ b/checksum/test/generate_test.go @@ -0,0 +1,21 @@ +package checksumtest_test + +import ( + "testing" + + "github.com/nspcc-dev/neofs-sdk-go/api/refs" + "github.com/nspcc-dev/neofs-sdk-go/checksum" + checksumtest "github.com/nspcc-dev/neofs-sdk-go/checksum/test" + "github.com/stretchr/testify/require" +) + +func TestChecksum(t *testing.T) { + cs := checksumtest.Checksum() + require.NotEqual(t, cs, checksumtest.Checksum()) + + var m refs.Checksum + cs.WriteToV2(&m) + var cs2 checksum.Checksum + require.NoError(t, cs2.ReadFromV2(&m)) + require.Equal(t, cs, cs2) +} diff --git a/client/accounting.go b/client/accounting.go index da8b974bc..9fa55cfdb 100644 --- a/client/accounting.go +++ b/client/accounting.go @@ -2,104 +2,91 @@ package client import ( "context" + "errors" + "fmt" + "time" - v2accounting "github.com/nspcc-dev/neofs-api-go/v2/accounting" - "github.com/nspcc-dev/neofs-api-go/v2/refs" - rpcapi "github.com/nspcc-dev/neofs-api-go/v2/rpc" - "github.com/nspcc-dev/neofs-api-go/v2/rpc/client" "github.com/nspcc-dev/neofs-sdk-go/accounting" + apiaccounting "github.com/nspcc-dev/neofs-sdk-go/api/accounting" + "github.com/nspcc-dev/neofs-sdk-go/api/refs" + apistatus "github.com/nspcc-dev/neofs-sdk-go/client/status" + neofscrypto "github.com/nspcc-dev/neofs-sdk-go/crypto" "github.com/nspcc-dev/neofs-sdk-go/stat" "github.com/nspcc-dev/neofs-sdk-go/user" ) -var ( - // special variable for test purposes, to overwrite real RPC calls. - rpcAPIBalance = rpcapi.Balance -) - -// PrmBalanceGet groups parameters of BalanceGet operation. -type PrmBalanceGet struct { - prmCommonMeta - - accountSet bool - account user.ID -} - -// SetAccount sets identifier of the NeoFS account for which the balance is requested. -// Required parameter. -func (x *PrmBalanceGet) SetAccount(id user.ID) { - x.account = id - x.accountSet = true -} +// GetBalanceOptions groups optional parameters of [Client.GetBalance]. +type GetBalanceOptions struct{} -// BalanceGet requests current balance of the NeoFS account. -// -// Any errors (local or remote, including returned status codes) are returned as Go errors, -// see [apistatus] package for NeoFS-specific error types. -// -// Context is required and must not be nil. It is used for network communication. -// -// Return errors: -// - [ErrMissingAccount] -func (c *Client) BalanceGet(ctx context.Context, prm PrmBalanceGet) (accounting.Decimal, error) { +// GetBalance requests current balance of the NeoFS account. +func (c *Client) GetBalance(ctx context.Context, usr user.ID, _ GetBalanceOptions) (accounting.Decimal, error) { + var res accounting.Decimal var err error - defer func() { - c.sendStatistic(stat.MethodBalanceGet, err)() - }() - - switch { - case !prm.accountSet: - err = ErrMissingAccount - return accounting.Decimal{}, err + if c.handleAPIOpResult != nil { + defer func(start time.Time) { + c.handleAPIOpResult(c.serverPubKey, c.endpoint, stat.MethodBalanceGet, time.Since(start), err) + }(time.Now()) } - // form request body - var accountV2 refs.OwnerID - prm.account.WriteToV2(&accountV2) - - var body v2accounting.BalanceRequestBody - body.SetOwnerID(&accountV2) - // form request - var req v2accounting.BalanceRequest - - req.SetBody(&body) - - // init call context - - var ( - cc contextCall - res accounting.Decimal - ) - - c.initCallContext(&cc) - cc.meta = prm.prmCommonMeta - cc.req = &req - cc.call = func() (responseV2, error) { - return rpcAPIBalance(&c.c, &req, client.WithContext(ctx)) + req := &apiaccounting.BalanceRequest{ + Body: &apiaccounting.BalanceRequest_Body{OwnerId: new(refs.OwnerID)}, + } + usr.WriteToV2(req.Body.OwnerId) + // FIXME: balance requests need small fixed-size buffers for encoding, its makes + // no sense to mosh them with other buffers + buf := c.signBuffers.Get().(*[]byte) + defer c.signBuffers.Put(buf) + if req.VerifyHeader, err = neofscrypto.SignRequest(c.signer, req, req.Body, *buf); err != nil { + err = fmt.Errorf("%v: %w", errSignRequest, err) // for closure above + return res, err } - cc.result = func(r responseV2) { - resp := r.(*v2accounting.BalanceResponse) - - const fieldBalance = "balance" - bal := resp.GetBody().GetBalance() - if bal == nil { - cc.err = newErrMissingResponseField(fieldBalance) - return - } + // send request + resp, err := c.transport.accounting.Balance(ctx, req) + if err != nil { + err = fmt.Errorf("%s: %w", errTransport, err) // for closure above + return res, err + } - cc.err = res.ReadFromV2(*bal) - if cc.err != nil { - cc.err = newErrInvalidResponseField(fieldBalance, cc.err) + // intercept response info + if c.interceptAPIRespInfo != nil { + if err = c.interceptAPIRespInfo(ResponseMetaInfo{ + key: resp.GetVerifyHeader().GetBodySignature().GetKey(), + epoch: resp.GetMetaHeader().GetEpoch(), + }); err != nil { + err = fmt.Errorf("%s: %w", errInterceptResponseInfo, err) // for closure above + return res, err } } - // process call - if !cc.processCall() { - err = cc.err - return accounting.Decimal{}, cc.err + // verify response integrity + if err = neofscrypto.VerifyResponse(resp, resp.Body); err != nil { + err = fmt.Errorf("%s: %w", errResponseSignature, err) // for closure above + return res, err + } + sts, err := apistatus.ErrorFromV2(resp.GetMetaHeader().GetStatus()) + if err != nil { + err = fmt.Errorf("%s: %w", errInvalidResponseStatus, err) // for closure above + return res, err + } + if sts != nil { + err = sts // for closure above + return res, err } + // decode response payload + if resp.Body == nil { + err = errors.New(errMissingResponseBody) // for closure above + return res, err + } + const fieldBalance = "balance" + if resp.Body.Balance == nil { + err = fmt.Errorf("%s (%s)", errMissingResponseBodyField, fieldBalance) // for closure above + return res, err + } else if err = res.ReadFromV2(resp.Body.Balance); err != nil { + err = fmt.Errorf("%s (%s): %w", errInvalidResponseBodyField, fieldBalance, err) // for closure above + return res, err + } return res, nil } diff --git a/client/accounting_test.go b/client/accounting_test.go index 377aa1ce7..ee11701a2 100644 --- a/client/accounting_test.go +++ b/client/accounting_test.go @@ -1,20 +1,384 @@ package client import ( + "bytes" "context" + "errors" + "fmt" + "net" "testing" + "time" + "github.com/nspcc-dev/neofs-sdk-go/accounting" + accountingtest "github.com/nspcc-dev/neofs-sdk-go/accounting/test" + apiaccounting "github.com/nspcc-dev/neofs-sdk-go/api/accounting" + apinetmap "github.com/nspcc-dev/neofs-sdk-go/api/netmap" + "github.com/nspcc-dev/neofs-sdk-go/api/refs" + apisession "github.com/nspcc-dev/neofs-sdk-go/api/session" + "github.com/nspcc-dev/neofs-sdk-go/api/status" + apistatus "github.com/nspcc-dev/neofs-sdk-go/client/status" + neofscrypto "github.com/nspcc-dev/neofs-sdk-go/crypto" + neofscryptotest "github.com/nspcc-dev/neofs-sdk-go/crypto/test" + netmaptest "github.com/nspcc-dev/neofs-sdk-go/netmap/test" + "github.com/nspcc-dev/neofs-sdk-go/stat" + "github.com/nspcc-dev/neofs-sdk-go/user" + usertest "github.com/nspcc-dev/neofs-sdk-go/user/test" + versiontest "github.com/nspcc-dev/neofs-sdk-go/version/test" "github.com/stretchr/testify/require" + "google.golang.org/grpc" + "google.golang.org/grpc/test/bufconn" ) -func TestClient_BalanceGet(t *testing.T) { - c := newClient(t, nil) +type noOtherAccountingCalls struct{} + +func (noOtherAccountingCalls) Balance(context.Context, *apiaccounting.BalanceRequest) (*apiaccounting.BalanceResponse, error) { + panic("must not be called") +} + +type getBalanceServer struct { + noOtherAccountingCalls + // client + usr user.ID + clientSigScheme neofscrypto.Scheme + clientPubKey []byte + // server + sleepDur time.Duration + endpointInfoOnDialServer + balance accounting.Decimal + errTransport error + modifyResp func(*apiaccounting.BalanceResponse) + corruptRespSig func(*apiaccounting.BalanceResponse) +} + +func (x getBalanceServer) Balance(ctx context.Context, req *apiaccounting.BalanceRequest) (*apiaccounting.BalanceResponse, error) { + if x.sleepDur > 0 { + time.Sleep(x.sleepDur) + } + if x.errTransport != nil { + return nil, x.errTransport + } + var sts status.Status + resp := apiaccounting.BalanceResponse{ + MetaHeader: &apisession.ResponseMetaHeader{Status: &sts, Epoch: x.epoch}, + } + var err error + var usr user.ID + if ctx == nil { + sts.Code, sts.Message = status.InternalServerError, "nil context" + } else if req == nil { + sts.Code, sts.Message = status.InternalServerError, "nil request" + } else if err = neofscrypto.VerifyRequest(req, req.Body); err != nil { + sts.Code, sts.Message = status.SignatureVerificationFail, err.Error() + } else if req.VerifyHeader.BodySignature.Scheme != refs.SignatureScheme(x.clientSigScheme) || + !bytes.Equal(req.VerifyHeader.BodySignature.Key, x.clientPubKey) { + sts.Code, sts.Message = status.InternalServerError, "[test] unexpected request body signature credentials" + } else if req.VerifyHeader.MetaSignature.Scheme != refs.SignatureScheme(x.clientSigScheme) || + !bytes.Equal(req.VerifyHeader.MetaSignature.Key, x.clientPubKey) { + sts.Code, sts.Message = status.InternalServerError, "[test] unexpected request meta header signature credentials" + } else if req.VerifyHeader.OriginSignature.Scheme != refs.SignatureScheme(x.clientSigScheme) || + !bytes.Equal(req.VerifyHeader.OriginSignature.Key, x.clientPubKey) { + sts.Code, sts.Message = status.InternalServerError, "[test] unexpected origin request verification header signature credentials" + } else if req.MetaHeader != nil { + sts.Code, sts.Message = status.InternalServerError, "invalid request: meta header is set" + } else if req.Body == nil { + sts.Code, sts.Message = status.InternalServerError, "invalid request: missing body" + } else if req.Body.OwnerId == nil { + sts.Code, sts.Message = status.InternalServerError, "invalid request: invalid body: missing user" + } else if err = usr.ReadFromV2(req.Body.OwnerId); err != nil { + sts.Code, sts.Message = status.InternalServerError, fmt.Sprintf("invalid request: invalid body: invalid user field: %s", err) + } else if usr != x.usr { + sts.Code, sts.Message = status.InternalServerError, "[test] wrong user" + } else { + resp.MetaHeader.Status = nil + resp.Body = &apiaccounting.BalanceResponse_Body{Balance: new(apiaccounting.Decimal)} + x.balance.WriteToV2(resp.Body.Balance) + } + if x.modifyResp != nil { + x.modifyResp(&resp) + } + resp.VerifyHeader, err = neofscrypto.SignResponse(x.serverSigner, &resp, resp.Body, nil) + if err != nil { + return nil, fmt.Errorf("sign response: %w", err) + } + if x.corruptRespSig != nil { + x.corruptRespSig(&resp) + } + return &resp, nil +} + +func TestClient_GetBalance(t *testing.T) { ctx := context.Background() + var srv getBalanceServer + srv.sleepDur = 10 * time.Millisecond + srv.serverSigner = neofscryptotest.RandomSigner() + srv.latestVersion = versiontest.Version() + srv.nodeInfo = netmaptest.NodeInfo() + srv.nodeInfo.SetPublicKey(neofscrypto.PublicKeyBytes(srv.serverSigner.Public())) + srv.usr = usertest.ID() + srv.balance = accountingtest.Decimal() + _dial := func(t testing.TB, srv *getBalanceServer, assertErr func(error), customizeOpts func(*Options)) (*Client, *bool) { + var opts Options + var handlerCalled bool + opts.SetAPIRequestResultHandler(func(nodeKey []byte, endpoint string, op stat.Method, dur time.Duration, err error) { + handlerCalled = true + require.Equal(t, srv.nodeInfo.PublicKey(), nodeKey) + require.Equal(t, "localhost:8080", endpoint) + require.Equal(t, stat.MethodBalanceGet, op) + require.Greater(t, dur, srv.sleepDur) + assertErr(err) + }) + if customizeOpts != nil { + customizeOpts(&opts) + } + + c, err := New(anyValidURI, opts) + require.NoError(t, err) + srv.clientSigScheme = c.signer.Scheme() + srv.clientPubKey = neofscrypto.PublicKeyBytes(c.signer.Public()) + + conn := bufconn.Listen(10 << 10) + gs := grpc.NewServer() + apinetmap.RegisterNetmapServiceServer(gs, srv) + apiaccounting.RegisterAccountingServiceServer(gs, srv) + go func() { _ = gs.Serve(conn) }() + t.Cleanup(gs.Stop) - t.Run("missing", func(t *testing.T) { - t.Run("account", func(t *testing.T) { - _, err := c.BalanceGet(ctx, PrmBalanceGet{}) - require.ErrorIs(t, err, ErrMissingAccount) + c.dial = func(ctx context.Context, _ string) (net.Conn, error) { return conn.DialContext(ctx) } + require.NoError(t, c.Dial(ctx)) + + return c, &handlerCalled + } + dial := func(t testing.TB, srv *getBalanceServer, assertErr func(error)) (*Client, *bool) { + return _dial(t, srv, assertErr, nil) + } + t.Run("OK", func(t *testing.T) { + srv := srv + assertErr := func(err error) { require.NoError(t, err) } + c, handlerCalled := dial(t, &srv, assertErr) + res, err := c.GetBalance(ctx, srv.usr, GetBalanceOptions{}) + assertErr(err) + require.Equal(t, srv.balance, res) + require.True(t, *handlerCalled) + }) + t.Run("fail", func(t *testing.T) { + t.Run("sign request", func(t *testing.T) { + srv := srv + srv.sleepDur = 0 + assertErr := func(err error) { require.ErrorContains(t, err, errSignRequest) } + c, handlerCalled := dial(t, &srv, assertErr) + c.signer = neofscryptotest.FailSigner(c.signer) + _, err := c.GetBalance(ctx, srv.usr, GetBalanceOptions{}) + assertErr(err) + require.True(t, *handlerCalled) + }) + t.Run("transport", func(t *testing.T) { + srv := srv + srv.errTransport = errors.New("any transport failure") + assertErr := func(err error) { + require.ErrorContains(t, err, errTransport) + require.ErrorContains(t, err, "any transport failure") + } + c, handlerCalled := dial(t, &srv, assertErr) + _, err := c.GetBalance(ctx, srv.usr, GetBalanceOptions{}) + assertErr(err) + require.True(t, *handlerCalled) + }) + t.Run("invalid response signature", func(t *testing.T) { + for i, testCase := range []struct { + err string + corrupt func(*apiaccounting.BalanceResponse) + }{ + {err: "missing verification header", + corrupt: func(r *apiaccounting.BalanceResponse) { r.VerifyHeader = nil }, + }, + {err: "missing body signature", + corrupt: func(r *apiaccounting.BalanceResponse) { r.VerifyHeader.BodySignature = nil }, + }, + {err: "missing signature of the meta header", + corrupt: func(r *apiaccounting.BalanceResponse) { r.VerifyHeader.MetaSignature = nil }, + }, + {err: "missing signature of the origin verification header", + corrupt: func(r *apiaccounting.BalanceResponse) { r.VerifyHeader.OriginSignature = nil }, + }, + {err: "verify body signature: missing public key", + corrupt: func(r *apiaccounting.BalanceResponse) { r.VerifyHeader.BodySignature.Key = nil }, + }, + {err: "verify signature of the meta header: missing public key", + corrupt: func(r *apiaccounting.BalanceResponse) { r.VerifyHeader.MetaSignature.Key = nil }, + }, + {err: "verify signature of the origin verification header: missing public key", + corrupt: func(r *apiaccounting.BalanceResponse) { r.VerifyHeader.OriginSignature.Key = nil }, + }, + {err: "verify body signature: decode public key from binary", + corrupt: func(r *apiaccounting.BalanceResponse) { + r.VerifyHeader.BodySignature.Key = []byte("not a public key") + }, + }, + {err: "verify signature of the meta header: decode public key from binary", + corrupt: func(r *apiaccounting.BalanceResponse) { + r.VerifyHeader.MetaSignature.Key = []byte("not a public key") + }, + }, + {err: "verify signature of the origin verification header: decode public key from binary", + corrupt: func(r *apiaccounting.BalanceResponse) { + r.VerifyHeader.OriginSignature.Key = []byte("not a public key") + }, + }, + {err: "verify body signature: invalid scheme -1", + corrupt: func(r *apiaccounting.BalanceResponse) { r.VerifyHeader.BodySignature.Scheme = -1 }, + }, + {err: "verify body signature: unsupported scheme 3", + corrupt: func(r *apiaccounting.BalanceResponse) { r.VerifyHeader.BodySignature.Scheme = 3 }, + }, + {err: "verify signature of the meta header: unsupported scheme 3", + corrupt: func(r *apiaccounting.BalanceResponse) { r.VerifyHeader.MetaSignature.Scheme = 3 }, + }, + {err: "verify signature of the origin verification header: unsupported scheme 3", + corrupt: func(r *apiaccounting.BalanceResponse) { r.VerifyHeader.OriginSignature.Scheme = 3 }, + }, + {err: "verify body signature: signature mismatch", + corrupt: func(r *apiaccounting.BalanceResponse) { r.VerifyHeader.BodySignature.Sign[0]++ }, + }, + {err: "verify signature of the meta header: signature mismatch", + corrupt: func(r *apiaccounting.BalanceResponse) { r.VerifyHeader.MetaSignature.Sign[0]++ }, + }, + {err: "verify signature of the origin verification header: signature mismatch", + corrupt: func(r *apiaccounting.BalanceResponse) { r.VerifyHeader.OriginSignature.Sign[0]++ }, + }, + {err: "verify body signature: signature mismatch", + corrupt: func(r *apiaccounting.BalanceResponse) { + r.VerifyHeader.BodySignature.Key = neofscrypto.PublicKeyBytes(neofscryptotest.RandomSigner().Public()) + }, + }, + {err: "verify signature of the meta header: signature mismatch", + corrupt: func(r *apiaccounting.BalanceResponse) { + r.VerifyHeader.MetaSignature.Key = neofscrypto.PublicKeyBytes(neofscryptotest.RandomSigner().Public()) + }, + }, + {err: "verify signature of the origin verification header: signature mismatch", + corrupt: func(r *apiaccounting.BalanceResponse) { + r.VerifyHeader.OriginSignature.Key = neofscrypto.PublicKeyBytes(neofscryptotest.RandomSigner().Public()) + }, + }, + } { + srv := srv + srv.corruptRespSig = testCase.corrupt + assertErr := func(err error) { + require.ErrorContains(t, err, errResponseSignature, [2]any{i, testCase}) + require.ErrorContains(t, err, testCase.err, [2]any{i, testCase}) + } + c, handlerCalled := dial(t, &srv, assertErr) + _, err := c.GetBalance(ctx, srv.usr, GetBalanceOptions{}) + assertErr(err) + require.True(t, *handlerCalled) + } + }) + t.Run("invalid response status", func(t *testing.T) { + srv := srv + srv.modifyResp = func(r *apiaccounting.BalanceResponse) { + r.MetaHeader.Status = &status.Status{Code: status.InternalServerError, Details: make([]*status.Status_Detail, 1)} + } + assertErr := func(err error) { + require.ErrorContains(t, err, errInvalidResponseStatus) + require.ErrorContains(t, err, "details attached but not supported") + } + c, handlerCalled := dial(t, &srv, assertErr) + _, err := c.GetBalance(ctx, srv.usr, GetBalanceOptions{}) + assertErr(err) + require.True(t, *handlerCalled) + }) + t.Run("status errors", func(t *testing.T) { + for _, testCase := range []struct { + code uint32 + errConst error + errVar any + }{ + {code: 1 << 32 / 2}, + {code: status.InternalServerError, errConst: apistatus.ErrServerInternal, errVar: new(apistatus.InternalServerError)}, + {code: status.SignatureVerificationFail, errConst: apistatus.ErrSignatureVerification, errVar: new(apistatus.SignatureVerificationFailure)}, + } { + srv := srv + srv.modifyResp = func(r *apiaccounting.BalanceResponse) { + r.MetaHeader.Status = &status.Status{Code: testCase.code, Message: "any message"} + } + assertErr := func(err error) { + require.ErrorIs(t, err, apistatus.Error, testCase) + require.ErrorContains(t, err, "any message", testCase) + if testCase.errConst != nil { + require.ErrorIs(t, err, testCase.errConst, testCase) + } + if testCase.errVar != nil { + require.ErrorAs(t, err, testCase.errVar, testCase) + } + } + c, handlerCalled := dial(t, &srv, assertErr) + _, err := c.GetBalance(ctx, srv.usr, GetBalanceOptions{}) + assertErr(err) + require.True(t, *handlerCalled, testCase) + } + }) + t.Run("response body", func(t *testing.T) { + t.Run("missing", func(t *testing.T) { + srv := srv + assertErr := func(err error) { require.EqualError(t, err, "invalid response: missing body") } + c, handlerCalled := dial(t, &srv, assertErr) + srv.modifyResp = func(r *apiaccounting.BalanceResponse) { r.Body = nil } + _, err := c.GetBalance(ctx, srv.usr, GetBalanceOptions{}) + assertErr(err) + require.True(t, *handlerCalled) + }) + t.Run("missing balance", func(t *testing.T) { + srv := srv + srv.modifyResp = func(r *apiaccounting.BalanceResponse) { r.Body.Balance = nil } + assertErr := func(err error) { + require.EqualError(t, err, "invalid response: invalid body: missing required field (balance)") + } + c, handlerCalled := dial(t, &srv, assertErr) + _, err := c.GetBalance(ctx, srv.usr, GetBalanceOptions{}) + assertErr(err) + require.True(t, *handlerCalled) + }) + }) + }) + t.Run("response info handler", func(t *testing.T) { + t.Run("OK", func(t *testing.T) { + srv := srv + srv.epoch = 3598503 + assertErr := func(err error) { require.NoError(t, err) } + respHandlerCalled := false + c, reqHandlerCalled := _dial(t, &srv, assertErr, func(opts *Options) { + opts.SetAPIResponseInfoInterceptor(func(info ResponseMetaInfo) error { + respHandlerCalled = true + require.EqualValues(t, 3598503, info.Epoch()) + require.Equal(t, neofscrypto.PublicKeyBytes(srv.serverSigner.Public()), info.ResponderKey()) + return nil + }) + }) + _, err := c.GetBalance(ctx, srv.usr, GetBalanceOptions{}) + assertErr(err) + require.True(t, respHandlerCalled) + require.True(t, *reqHandlerCalled) + }) + t.Run("fail", func(t *testing.T) { + srv := srv + srv.epoch = 4386380643 + assertErr := func(err error) { require.EqualError(t, err, "intercept response info: some handler error") } + respHandlerCalled := false + c, reqHandlerCalled := _dial(t, &srv, assertErr, func(opts *Options) { + opts.SetAPIResponseInfoInterceptor(func(info ResponseMetaInfo) error { + if !respHandlerCalled { // dial + respHandlerCalled = true + return nil + } + require.EqualValues(t, 4386380643, info.Epoch()) + require.Equal(t, neofscrypto.PublicKeyBytes(srv.serverSigner.Public()), info.ResponderKey()) + return errors.New("some handler error") + }) + }) + _, err := c.GetBalance(ctx, srv.usr, GetBalanceOptions{}) + assertErr(err) + require.True(t, respHandlerCalled) + require.True(t, *reqHandlerCalled) }) }) } diff --git a/client/api.go b/client/api.go index 88f3d313c..4061b0fe0 100644 --- a/client/api.go +++ b/client/api.go @@ -1,53 +1,46 @@ package client import ( - "context" - "fmt" - - v2netmap "github.com/nspcc-dev/neofs-api-go/v2/netmap" - rpcapi "github.com/nspcc-dev/neofs-api-go/v2/rpc" - "github.com/nspcc-dev/neofs-api-go/v2/rpc/client" - "github.com/nspcc-dev/neofs-api-go/v2/session" + apiaccounting "github.com/nspcc-dev/neofs-sdk-go/api/accounting" + apicontainer "github.com/nspcc-dev/neofs-sdk-go/api/container" + apinetmap "github.com/nspcc-dev/neofs-sdk-go/api/netmap" + apiobject "github.com/nspcc-dev/neofs-sdk-go/api/object" + apireputation "github.com/nspcc-dev/neofs-sdk-go/api/reputation" + apisession "github.com/nspcc-dev/neofs-sdk-go/api/session" + "google.golang.org/grpc" ) +// cross-RPC non-status errors. var ( - // special variables for test purposes only, to overwrite real RPC calls. - rpcAPINetMapSnapshot = rpcapi.NetMapSnapshot - rpcAPICreateSession = rpcapi.CreateSession + errSignRequest = "sign request" + errTransport = "transport failure" + errInterceptResponseInfo = "intercept response info" + errResponseSignature = "verify response signature" + errInvalidResponse = "invalid response" + errInvalidResponseStatus = errInvalidResponse + ": invalid status" + errMissingResponseBody = errInvalidResponse + ": missing body" + errInvalidResponseBody = errInvalidResponse + ": invalid body" + errMissingResponseBodyField = errInvalidResponseBody + ": missing required field" + errInvalidResponseBodyField = errInvalidResponseBody + ": invalid field" ) -// interface of NeoFS API server. Exists for test purposes only. -type neoFSAPIServer interface { - createSession(cli *client.Client, req *session.CreateRequest, opts ...client.CallOption) (*session.CreateResponse, error) - - netMapSnapshot(context.Context, v2netmap.SnapshotRequest) (*v2netmap.SnapshotResponse, error) -} - -// wrapper over real client connection which communicates over NeoFS API protocol. -// Provides neoFSAPIServer for Client instances used in real applications. -type coreServer client.Client - -// unifies errors of all RPC. -func rpcErr(e error) error { - return fmt.Errorf("rpc failure: %w", e) +// unites all NeoFS services served over gRPC. +type grpcTransport struct { + accounting apiaccounting.AccountingServiceClient + container apicontainer.ContainerServiceClient + netmap apinetmap.NetmapServiceClient + object apiobject.ObjectServiceClient + reputation apireputation.ReputationServiceClient + session apisession.SessionServiceClient } -// executes NetmapService.NetmapSnapshot RPC declared in NeoFS API protocol -// using underlying client.Client. -func (x *coreServer) netMapSnapshot(ctx context.Context, req v2netmap.SnapshotRequest) (*v2netmap.SnapshotResponse, error) { - resp, err := rpcAPINetMapSnapshot((*client.Client)(x), &req, client.WithContext(ctx)) - if err != nil { - return nil, rpcErr(err) +func newGRPCTransport(con *grpc.ClientConn) grpcTransport { + return grpcTransport{ + accounting: apiaccounting.NewAccountingServiceClient(con), + container: apicontainer.NewContainerServiceClient(con), + netmap: apinetmap.NewNetmapServiceClient(con), + object: apiobject.NewObjectServiceClient(con), + reputation: apireputation.NewReputationServiceClient(con), + session: apisession.NewSessionServiceClient(con), } - - return resp, nil -} - -func (x *coreServer) createSession(cli *client.Client, req *session.CreateRequest, opts ...client.CallOption) (*session.CreateResponse, error) { - resp, err := rpcAPICreateSession(cli, req, opts...) - if err != nil { - return nil, rpcErr(err) - } - - return resp, nil } diff --git a/client/client.go b/client/client.go index f3f76bdae..11a223c9c 100644 --- a/client/client.go +++ b/client/client.go @@ -2,22 +2,21 @@ package client import ( "context" - "crypto/tls" "fmt" + "net" + "net/url" "sync" - "time" "github.com/nspcc-dev/neo-go/pkg/crypto/keys" - "github.com/nspcc-dev/neofs-api-go/v2/rpc/client" neofscrypto "github.com/nspcc-dev/neofs-sdk-go/crypto" neofsecdsa "github.com/nspcc-dev/neofs-sdk-go/crypto/ecdsa" "github.com/nspcc-dev/neofs-sdk-go/stat" + "google.golang.org/grpc" + "google.golang.org/grpc/credentials" + "google.golang.org/grpc/credentials/insecure" ) -const ( - // max GRPC message size. - defaultBufferSize = 4194304 // 4MB -) +const defaultSignBufferSize = 4 << 20 // 4MB, max GRPC message size. // Client represents virtual connection to the NeoFS network to communicate // with NeoFS server using NeoFS API protocol. It is designed to provide @@ -49,242 +48,180 @@ const ( // // See client package overview to get some examples. type Client struct { - prm PrmInit + endpoint string + dial func(context.Context, string) (net.Conn, error) + withTLS bool - c client.Client + signer neofscrypto.Signer + signBuffers *sync.Pool - server neoFSAPIServer + interceptAPIRespInfo func(ResponseMetaInfo) error + handleAPIOpResult stat.OperationCallback - endpoint string - nodeKey []byte - - buffers *sync.Pool + // set on dial + conn *grpc.ClientConn + transport grpcTransport // based on conn + serverPubKey []byte } -// New creates an instance of Client initialized with the given parameters. -// -// See docs of [PrmInit] methods for details. See also [Client.Dial]/[Client.Close]. -func New(prm PrmInit) (*Client, error) { - var c = new(Client) - pk, err := keys.NewPrivateKey() +// parses s into a URI and returns host:port and a flag indicating enabled TLS. +func parseURI(s string) (string, bool, error) { + uri, err := url.ParseRequestURI(s) if err != nil { - return nil, fmt.Errorf("private key: %w", err) + return s, false, err } - prm.signer = neofsecdsa.SignerRFC6979(pk.PrivateKey) - - if prm.buffers != nil { - c.buffers = prm.buffers - } else { - size := prm.signMessageBufferSizes - if size == 0 { - size = defaultBufferSize + const grpcScheme = "grpc" + const grpcTLSScheme = "grpcs" + // check if passed string was parsed correctly + // URIs that do not start with a slash after the scheme are interpreted as: + // `scheme:opaque` => if `opaque` is not empty, then it is supposed that URI + // is in `host:port` format + if uri.Host == "" { + uri.Host = uri.Scheme + uri.Scheme = grpcScheme + if uri.Opaque != "" { + uri.Host = net.JoinHostPort(uri.Host, uri.Opaque) } + } - c.buffers = &sync.Pool{} - c.buffers.New = func() any { - b := make([]byte, size) - return &b - } + if uri.Scheme != grpcScheme && uri.Scheme != grpcTLSScheme { + return "", false, fmt.Errorf("unsupported URI scheme: %s", uri.Scheme) } - c.prm = prm - return c, nil + return uri.Host, uri.Scheme == grpcTLSScheme, nil } -// Dial establishes a connection to the server from the NeoFS network. -// Returns an error describing failure reason. If failed, the Client -// SHOULD NOT be used. -// -// Uses the context specified by SetContext if it was called with non-nil -// argument, otherwise context.Background() is used. Dial returns context -// errors, see context package docs for details. +// New initializes new Client to connect to NeoFS API server at the specified +// network endpoint with options. New does not dial the server: [Client.Dial] +// must be done after. Use [Dial] to open instant connection. // -// Panics if required parameters are set incorrectly, look carefully -// at the method documentation. +// URI format: // -// One-time method call during application start-up stage is expected. -// Calling multiple times leads to undefined behavior. +// [scheme://]host:port // -// Return client errors: -// - [ErrMissingServer] -// - [ErrNonPositiveTimeout] +// with one of supported schemes: // -// See also [Client.Close]. -func (c *Client) Dial(prm PrmDial) error { - if prm.endpoint == "" { - return ErrMissingServer - } - c.endpoint = prm.endpoint - - if prm.timeoutDialSet { - if prm.timeoutDial <= 0 { - return ErrNonPositiveTimeout - } - } else { - prm.timeoutDial = 5 * time.Second +// grpc +// grpcs +func New(uri string, opts Options) (*Client, error) { + endpoint, withTLS, err := parseURI(uri) + if err != nil { + return nil, fmt.Errorf("invalid URI: %w", err) } - if prm.streamTimeoutSet { - if prm.streamTimeout <= 0 { - return ErrNonPositiveTimeout - } - } else { - prm.streamTimeout = 10 * time.Second + pk, err := keys.NewPrivateKey() + if err != nil { + return nil, fmt.Errorf("randomize private key: %w", err) } - c.c = *client.New(append( - client.WithNetworkURIAddress(prm.endpoint, prm.tlsConfig), - client.WithDialTimeout(prm.timeoutDial), - client.WithRWTimeout(prm.streamTimeout), - )...) - - c.setNeoFSAPIServer((*coreServer)(&c.c)) + return &Client{ + endpoint: endpoint, + withTLS: withTLS, + signer: neofsecdsa.Signer(pk.PrivateKey), + signBuffers: &sync.Pool{New: func() any { + b := make([]byte, defaultSignBufferSize) + return &b + }}, + interceptAPIRespInfo: opts.interceptAPIRespInfo, + handleAPIOpResult: opts.handleAPIReqResult, + }, nil +} - if prm.parentCtx == nil { - prm.parentCtx = context.Background() +// Dial establishes connection to the NeoFS API server by its parameterized +// network URI and options. After use, the connection must be closed using +// [Client.Close]. If Dial fails, the Client must no longer be used. +// +// If operation result handler is specified, Dial also requests server info +// required for it. See [Options.SetOpResultHandler]. +// +// Dial does not modify context whose deadline may interrupt the connection. Use +// [context.WithTimeout] to prevent potential hangup. +func (c *Client) Dial(ctx context.Context) error { + var creds credentials.TransportCredentials + if c.withTLS { + creds = credentials.NewTLS(nil) + } else { + creds = insecure.NewCredentials() } - endpointInfo, err := c.EndpointInfo(prm.parentCtx, PrmEndpointInfo{}) + var err error + c.conn, err = grpc.DialContext(ctx, c.endpoint, + grpc.WithContextDialer(c.dial), + grpc.WithTransportCredentials(creds), + grpc.WithReturnConnectionError(), + grpc.FailOnNonTempDialError(true), + ) if err != nil { - return err + return fmt.Errorf("gRPC dial %s: %w", c.endpoint, err) } - c.nodeKey = endpointInfo.NodeInfo().PublicKey() + c.transport = newGRPCTransport(c.conn) + + if c.handleAPIOpResult != nil { + endpointInfo, err := c.GetEndpointInfo(ctx, GetEndpointInfoOptions{}) + if err != nil { + return fmt.Errorf("request node info from the server for stat tracking: %w", err) + } + c.serverPubKey = endpointInfo.Node.PublicKey() + } return nil } -// sets underlying provider of neoFSAPIServer. The method is used for testing as an approach -// to skip Dial stage and override NeoFS API server. MUST NOT be used outside test code. -// In real applications wrapper over github.com/nspcc-dev/neofs-api-go/v2/rpc/client -// is statically used. -func (c *Client) setNeoFSAPIServer(server neoFSAPIServer) { - c.server = server +// Dial connects to the NeoFS API server by its URI with options and returns +// ready-to-go Client. After use, the connection must be closed via +// [Client.Close]. If application needs delayed dial, use [New] + [Client.Dial] +// combo. +func Dial(ctx context.Context, uri string, opts Options) (*Client, error) { + c, err := New(uri, opts) + if err != nil { + return nil, err + } + return c, c.Dial(ctx) } -// Close closes underlying connection to the NeoFS server. Implements io.Closer. -// MUST NOT be called before successful Dial. Can be called concurrently -// with server operations processing on running goroutines: in this case -// they are likely to fail due to a connection error. +// Close closes underlying connection to the NeoFS server. Implements +// [io.Closer]. Close MUST NOT be called before successful [Client.Dial]. Close +// can be called concurrently with server operations processing on running +// goroutines: in this case they are likely to fail due to a connection error. // -// One-time method call during application shutdown stage (after [Client.Dial]) -// is expected. Calling multiple times leads to undefined behavior. -// -// See also [Client.Dial]. +// One-time method call during application shutdown stage is expected. Calling +// multiple times leads to undefined behavior. func (c *Client) Close() error { - return c.c.Conn().Close() + return c.conn.Close() } -func (c *Client) sendStatistic(m stat.Method, err error) func() { - if c.prm.statisticCallback == nil { - return func() {} - } - - ts := time.Now() - return func() { - c.prm.statisticCallback(c.nodeKey, c.endpoint, m, time.Since(ts), err) - } -} +// // SetSignMessageBuffers sets buffers which are using in GRPC message signing +// // process and helps to reduce memory allocations. +// func (x *Options) SetSignMessageBuffers(buffers *sync.Pool) { +// x.signBuffers = buffers +// } -// PrmInit groups initialization parameters of Client instances. +// Options groups optional Client parameters. // // See also [New]. -type PrmInit struct { - signer neofscrypto.Signer - - cbRespInfo func(ResponseMetaInfo) error - +type Options struct { netMagic uint64 - statisticCallback stat.OperationCallback + handleAPIReqResult stat.OperationCallback + interceptAPIRespInfo func(ResponseMetaInfo) error - signMessageBufferSizes uint64 - buffers *sync.Pool + signBuffersSize uint64 + signBuffers *sync.Pool } -// SetSignMessageBufferSizes sets single buffer size to the buffers pool inside client. -// This pool are using in GRPC message signing process and helps to reduce memory allocations. -func (x *PrmInit) SetSignMessageBufferSizes(size uint64) { - x.signMessageBufferSizes = size -} - -// SetSignMessageBuffers sets buffers which are using in GRPC message signing process and helps to reduce memory allocations. -func (x *PrmInit) SetSignMessageBuffers(buffers *sync.Pool) { - x.buffers = buffers -} - -// SetResponseInfoCallback makes the Client to pass ResponseMetaInfo from each -// NeoFS server response to f. Nil (default) means ignore response meta info. -func (x *PrmInit) SetResponseInfoCallback(f func(ResponseMetaInfo) error) { - x.cbRespInfo = f -} - -// SetStatisticCallback makes the Client to pass [stat.OperationCallback] for the external statistic. -func (x *PrmInit) SetStatisticCallback(statisticCallback stat.OperationCallback) { - x.statisticCallback = statisticCallback -} - -// PrmDial groups connection parameters for the Client. -// -// See also Dial. -type PrmDial struct { - endpoint string - - tlsConfig *tls.Config - - timeoutDialSet bool - timeoutDial time.Duration - - streamTimeoutSet bool - streamTimeout time.Duration - - parentCtx context.Context +// SetAPIResponseInfoInterceptor allows to intercept meta information from each +// NeoFS server response before its processing. If f returns an error, [Client] +// immediately returns it from the method. Nil (default) means ignore response +// meta info. +func (x *Options) SetAPIResponseInfoInterceptor(f func(ResponseMetaInfo) error) { + x.interceptAPIRespInfo = f } -// SetServerURI sets server URI in the NeoFS network. -// Required parameter. -// -// Format of the URI: -// -// [scheme://]host:port -// -// Supported schemes: -// -// grpc -// grpcs -// -// See also SetTLSConfig. -func (x *PrmDial) SetServerURI(endpoint string) { - x.endpoint = endpoint -} - -// SetTLSConfig sets tls.Config to open TLS client connection -// to the NeoFS server. Nil (default) means insecure connection. -// -// See also SetServerURI. -func (x *PrmDial) SetTLSConfig(tlsConfig *tls.Config) { - x.tlsConfig = tlsConfig -} - -// SetTimeout sets the timeout for connection to be established. -// MUST BE positive. If not called, 5s timeout will be used by default. -func (x *PrmDial) SetTimeout(timeout time.Duration) { - x.timeoutDialSet = true - x.timeoutDial = timeout -} - -// SetStreamTimeout sets the timeout for individual operations in streaming RPC. -// MUST BE positive. If not called, 10s timeout will be used by default. -func (x *PrmDial) SetStreamTimeout(timeout time.Duration) { - x.streamTimeoutSet = true - x.streamTimeout = timeout -} - -// SetContext allows to specify optional base context within which connection -// should be established. -// -// Context SHOULD NOT be nil. -func (x *PrmDial) SetContext(ctx context.Context) { - x.parentCtx = ctx +// SetAPIRequestResultHandler makes the [Client] to pass result of each +// performed NeoFS API operation to the specified handler. Nil (default) +// disables handling. +func (x *Options) SetAPIRequestResultHandler(f stat.OperationCallback) { + x.handleAPIReqResult = f } diff --git a/client/client_test.go b/client/client_test.go index 0f6e66e33..ee7b06270 100644 --- a/client/client_test.go +++ b/client/client_test.go @@ -1,74 +1,36 @@ package client import ( - "context" + "errors" "testing" - apistatus "github.com/nspcc-dev/neofs-sdk-go/client/status" neofscrypto "github.com/nspcc-dev/neofs-sdk-go/crypto" "github.com/stretchr/testify/require" ) -/* -File contains common functionality used for client package testing. -*/ +const anyValidURI = "grpc://localhost:8080" -var statusErr apistatus.ServerInternal - -func init() { - statusErr.SetMessage("test status error") +type disposableSigner struct { + neofscrypto.Signer + signed bool } -func newClient(t *testing.T, server neoFSAPIServer) *Client { - var prm PrmInit - - c, err := New(prm) - require.NoError(t, err) - c.setNeoFSAPIServer(server) - - return c +// returns signer which uses s only once and then only fails. +func newDisposableSigner(s neofscrypto.Signer) neofscrypto.Signer { + return &disposableSigner{Signer: s} } -func TestClient_DialContext(t *testing.T) { - var prmInit PrmInit - - c, err := New(prmInit) - require.NoError(t, err) - - // try to connect to any host - var prm PrmDial - prm.SetServerURI("localhost:8080") - - assert := func(ctx context.Context, errExpected error) { - // use the particular context - prm.SetContext(ctx) - - // expect particular context error according to Dial docs - require.ErrorIs(t, c.Dial(prm), errExpected) +func (x *disposableSigner) Sign(data []byte) ([]byte, error) { + if x.signed { + return nil, errors.New("already signed") } - - // create pre-abandoned context - ctx, cancel := context.WithCancel(context.Background()) - cancel() - - assert(ctx, context.Canceled) - - // create "pre-deadlined" context - ctx, cancel = context.WithTimeout(context.Background(), 0) - defer cancel() - - assert(ctx, context.DeadlineExceeded) + x.signed = true + return x.Signer.Sign(data) } -type nopPublicKey struct{} - -func (x nopPublicKey) MaxEncodedSize() int { return 10 } -func (x nopPublicKey) Encode(buf []byte) int { return copy(buf, "public_key") } -func (x nopPublicKey) Decode([]byte) error { return nil } -func (x nopPublicKey) Verify(_, _ []byte) bool { return true } - -type nopSigner struct{} - -func (nopSigner) Scheme() neofscrypto.Scheme { return neofscrypto.ECDSA_SHA512 } -func (nopSigner) Sign([]byte) ([]byte, error) { return []byte("signature"), nil } -func (x nopSigner) Public() neofscrypto.PublicKey { return nopPublicKey{} } +func TestParseURI(t *testing.T) { + addr, isTLS, err := parseURI(anyValidURI) + require.NoError(t, err) + require.False(t, isTLS) + require.Equal(t, "localhost:8080", addr) +} diff --git a/client/common.go b/client/common.go index 5134b3400..2f40ae438 100644 --- a/client/common.go +++ b/client/common.go @@ -1,321 +1,310 @@ package client -import ( - "fmt" - - "github.com/nspcc-dev/neofs-api-go/v2/refs" - "github.com/nspcc-dev/neofs-api-go/v2/rpc/client" - v2session "github.com/nspcc-dev/neofs-api-go/v2/session" - apistatus "github.com/nspcc-dev/neofs-sdk-go/client/status" - neofscrypto "github.com/nspcc-dev/neofs-sdk-go/crypto" - "github.com/nspcc-dev/neofs-sdk-go/version" -) - -// Various field numbers in from NeoFS API definitions. -const ( - fieldNumSigPubKey = 1 - fieldNumSigVal = 2 - fieldNumSigScheme = 3 -) - -// groups meta parameters shared between all Client operations. -type prmCommonMeta struct { - // NeoFS request X-Headers - xHeaders []string -} - -// WithXHeaders specifies list of extended headers (string key-value pairs) -// to be attached to the request. Must have an even length. -// -// Slice must not be mutated until the operation completes. -func (x *prmCommonMeta) WithXHeaders(hs ...string) { - if len(hs)%2 != 0 { - panic("slice of X-Headers with odd length") - } - - x.xHeaders = hs -} - -func writeXHeadersToMeta(xHeaders []string, h *v2session.RequestMetaHeader) { - if len(xHeaders) == 0 { - return - } - - if len(xHeaders)%2 != 0 { - panic("slice of X-Headers with odd length") - } - - hs := make([]v2session.XHeader, len(xHeaders)/2) - j := 0 - - for i := 0; i < len(xHeaders); i += 2 { - hs[j].SetKey(xHeaders[i]) - hs[j].SetValue(xHeaders[i+1]) - j++ - } - - h.SetXHeaders(hs) -} - -// groups all the details required to send a single request and process a response to it. -type contextCall struct { - // ================================================== - // state vars that do not require explicit initialization - - // final error to be returned from client method - err error - - // received response - resp responseV2 - - // ================================================== - // shared parameters which are set uniformly on all calls - - // request signer - signer neofscrypto.Signer - - // callback prior to processing the response by the client - callbackResp func(ResponseMetaInfo) error - - // NeoFS network magic - netMagic uint64 - - // Meta parameters - meta prmCommonMeta - - // ================================================== - // custom call parameters - - // request to be signed with a signer and sent - req request - - // function to send a request (unary) and receive a response - call func() (responseV2, error) - - // function to send the request (req field) - wReq func() error - - // function to recv the response (resp field) - rResp func() error - - // function to close the message stream - closer func() error - - // function of writing response fields to the resulting structure (optional) - result func(v2 responseV2) - - buf []byte - bufCleanCallback func() -} - -type request interface { - GetMetaHeader() *v2session.RequestMetaHeader - SetMetaHeader(*v2session.RequestMetaHeader) - SetVerificationHeader(*v2session.RequestVerificationHeader) -} - -// sets needed fields of the request meta header. -func (x contextCall) prepareRequest() { - meta := x.req.GetMetaHeader() - if meta == nil { - meta = new(v2session.RequestMetaHeader) - x.req.SetMetaHeader(meta) - } - - if meta.GetTTL() == 0 { - meta.SetTTL(2) - } - - if meta.GetVersion() == nil { - var verV2 refs.Version - version.Current().WriteToV2(&verV2) - meta.SetVersion(&verV2) - } - - meta.SetNetworkMagic(x.netMagic) - - writeXHeadersToMeta(x.meta.xHeaders, meta) -} - -func (c *Client) prepareRequest(req request, meta *v2session.RequestMetaHeader) { - ttl := meta.GetTTL() - if ttl == 0 { - ttl = 2 - } - - verV2 := meta.GetVersion() - if verV2 == nil { - verV2 = new(refs.Version) - version.Current().WriteToV2(verV2) - } - - meta.SetTTL(ttl) - meta.SetVersion(verV2) - meta.SetNetworkMagic(c.prm.netMagic) - - req.SetMetaHeader(meta) -} - -// prepares, signs and writes the request. Result means success. -// If failed, contextCall.err contains the reason. -func (x *contextCall) writeRequest() bool { - x.prepareRequest() - - x.req.SetVerificationHeader(nil) - - // sign the request - x.err = signServiceMessage(x.signer, x.req, x.buf) - if x.err != nil { - x.err = fmt.Errorf("sign request: %w", x.err) - return false - } - - x.err = x.wReq() - if x.err != nil { - x.err = fmt.Errorf("write request: %w", x.err) - return false - } - - return true -} - -// performs common actions of response processing and writes any problem as a result status or client error -// (in both cases returns false). -// -// Actions: -// - verify signature (internal); -// - call response callback (internal); -// - unwrap status error (optional). -func (x *contextCall) processResponse() bool { - // call response callback if set - if x.callbackResp != nil { - x.err = x.callbackResp(ResponseMetaInfo{ - key: x.resp.GetVerificationHeader().GetBodySignature().GetKey(), - epoch: x.resp.GetMetaHeader().GetEpoch(), - }) - if x.err != nil { - x.err = fmt.Errorf("response callback error: %w", x.err) - return false - } - } - - // note that we call response callback before signature check since it is expected more lightweight - // while verification needs marshaling - - // verify response signature - x.err = verifyServiceMessage(x.resp) - if x.err != nil { - x.err = fmt.Errorf("invalid response signature: %w", x.err) - return false - } - - // get result status - x.err = apistatus.ErrorFromV2(x.resp.GetMetaHeader().GetStatus()) - return x.err == nil -} - -// processResponse verifies response signature. -func (c *Client) processResponse(resp responseV2) error { - if err := verifyServiceMessage(resp); err != nil { - return fmt.Errorf("invalid response signature: %w", err) - } - - return apistatus.ErrorFromV2(resp.GetMetaHeader().GetStatus()) -} - -// reads response (if rResp is set) and processes it. Result means success. -// If failed, contextCall.err contains the reason. -func (x *contextCall) readResponse() bool { - if x.rResp != nil { - x.err = x.rResp() - if x.err != nil { - x.err = fmt.Errorf("read response: %w", x.err) - return false - } - } - - return x.processResponse() -} - -// closes the message stream (if closer is set) and writes the results (if result is set). -// Return means success. If failed, contextCall.err contains the reason. -func (x *contextCall) close() bool { - if x.closer != nil { - x.err = x.closer() - if x.err != nil { - x.err = fmt.Errorf("close RPC: %w", x.err) - return false - } - } - - // write response to resulting structure - if x.result != nil { - x.result(x.resp) - } - - return x.err == nil -} - -// goes through all stages of sending a request and processing a response. Returns true if successful. -// If failed, contextCall.err contains the reason. -func (x *contextCall) processCall() bool { - // set request writer - x.wReq = func() error { - var err error - x.resp, err = x.call() - return err - } - - // write request - ok := x.writeRequest() - if x.bufCleanCallback != nil { - x.bufCleanCallback() - } - - if !ok { - return false - } - - // read response - ok = x.readResponse() - if !ok { - return x.err == nil - } - - // close and write response to resulting structure - ok = x.close() - if !ok { - return false - } - - return x.err == nil -} - -// initializes static cross-call parameters inherited from client. -func (c *Client) initCallContext(ctx *contextCall) { - ctx.signer = c.prm.signer - ctx.callbackResp = c.prm.cbRespInfo - ctx.netMagic = c.prm.netMagic - - buf := c.buffers.Get().(*[]byte) - ctx.buf = *buf - ctx.bufCleanCallback = func() { - c.buffers.Put(buf) - } -} - -// ExecRaw executes f with underlying github.com/nspcc-dev/neofs-api-go/v2/rpc/client.Client -// instance. Communicate over the Protocol Buffers protocol in a more flexible way: -// most often used to transmit data over a fixed version of the NeoFS protocol, as well -// as to support custom services. +// // Various field numbers in from NeoFS API definitions. +// const ( +// fieldNumSigPubKey = 1 +// fieldNumSigVal = 2 +// fieldNumSigScheme = 3 +// ) +// +// // groups meta parameters shared between all Client operations. +// type prmCommonMeta struct { +// // NeoFS request X-Headers +// xHeaders []string +// } +// +// // WithXHeaders specifies list of extended headers (string key-value pairs) +// // to be attached to the request. Must have an even length. +// // +// // Slice must not be mutated until the operation completes. +// func (x *prmCommonMeta) WithXHeaders(hs ...string) { +// if len(hs)%2 != 0 { +// panic("slice of X-Headers with odd length") +// } +// +// x.xHeaders = hs +// } +// +// func writeXHeadersToMeta(xHeaders []string, h *v2session.RequestMetaHeader) { +// if len(xHeaders) == 0 { +// return +// } +// +// if len(xHeaders)%2 != 0 { +// panic("slice of X-Headers with odd length") +// } +// +// hs := make([]v2session.XHeader, len(xHeaders)/2) +// j := 0 +// +// for i := 0; i < len(xHeaders); i += 2 { +// hs[j].SetKey(xHeaders[i]) +// hs[j].SetValue(xHeaders[i+1]) +// j++ +// } +// +// h.SetXHeaders(hs) +// } +// +// // groups all the details required to send a single request and process a response to it. +// type contextCall struct { +// // ================================================== +// // state vars that do not require explicit initialization +// +// // final error to be returned from client method +// err error +// +// // received response +// resp responseV2 +// +// // ================================================== +// // shared parameters which are set uniformly on all calls +// +// // request signer +// signer neofscrypto.Signer +// +// // callback prior to processing the response by the client +// callbackResp func(ResponseMetaInfo) error +// +// // NeoFS network magic +// netMagic uint64 +// +// // Meta parameters +// meta prmCommonMeta +// +// // ================================================== +// // custom call parameters +// +// // request to be signed with a signer and sent +// req request +// +// // function to send a request (unary) and receive a response +// call func() (responseV2, error) +// +// // function to send the request (req field) +// wReq func() error +// +// // function to recv the response (resp field) +// rResp func() error +// +// // function to close the message stream +// closer func() error +// +// // function of writing response fields to the resulting structure (optional) +// result func(v2 responseV2) +// +// buf []byte +// bufCleanCallback func() +// } +// +// type request interface { +// GetMetaHeader() *v2session.RequestMetaHeader +// SetMetaHeader(*v2session.RequestMetaHeader) +// SetVerificationHeader(*v2session.RequestVerificationHeader) +// } +// +// // sets needed fields of the request meta header. +// func (x contextCall) prepareRequest() { +// meta := x.req.GetMetaHeader() +// if meta == nil { +// meta = new(v2session.RequestMetaHeader) +// x.req.SetMetaHeader(meta) +// } +// +// if meta.GetTTL() == 0 { +// meta.SetTTL(2) +// } +// +// if meta.GetVersion() == nil { +// var verV2 refs.Version +// version.Current().WriteToV2(&verV2) +// meta.SetVersion(&verV2) +// } +// +// meta.SetNetworkMagic(x.netMagic) +// +// writeXHeadersToMeta(x.meta.xHeaders, meta) +// } +// +// func (c *Client) prepareRequest(req request, meta *v2session.RequestMetaHeader) { +// ttl := meta.GetTTL() +// if ttl == 0 { +// ttl = 2 +// } +// +// verV2 := meta.GetVersion() +// if verV2 == nil { +// verV2 = new(refs.Version) +// version.Current().WriteToV2(verV2) +// } +// +// meta.SetTTL(ttl) +// meta.SetVersion(verV2) +// meta.SetNetworkMagic(c.prm.netMagic) +// +// req.SetMetaHeader(meta) +// } +// +// // prepares, signs and writes the request. Result means success. +// // If failed, contextCall.err contains the reason. +// func (x *contextCall) writeRequest() bool { +// x.prepareRequest() +// +// x.req.SetVerificationHeader(nil) +// +// // sign the request +// x.err = signServiceMessage(x.signer, x.req, x.buf) +// if x.err != nil { +// x.err = fmt.Errorf("sign request: %w", x.err) +// return false +// } +// +// x.err = x.wReq() +// if x.err != nil { +// x.err = fmt.Errorf("write request: %w", x.err) +// return false +// } +// +// return true +// } +// +// // performs common actions of response processing and writes any problem as a result status or client error +// // (in both cases returns false). +// // +// // Actions: +// // - verify signature (internal); +// // - call response callback (internal); +// // - unwrap status error (optional). +// func (x *contextCall) processResponse() bool { +// // call response callback if set +// if x.callbackResp != nil { +// x.err = x.callbackResp(ResponseMetaInfo{ +// key: x.resp.GetVerificationHeader().GetBodySignature().GetKey(), +// epoch: x.resp.GetMetaHeader().GetEpoch(), +// }) +// if x.err != nil { +// x.err = fmt.Errorf("response callback error: %w", x.err) +// return false +// } +// } +// +// // note that we call response callback before signature check since it is expected more lightweight +// // while verification needs marshaling +// +// // verify response signature +// x.err = verifyServiceMessage(x.resp) +// if x.err != nil { +// x.err = fmt.Errorf("invalid response signature: %w", x.err) +// return false +// } +// +// // get result status +// x.err = apistatus.ErrorFromV2(x.resp.GetMetaHeader().GetStatus()) +// return x.err == nil +// } +// +// // processResponse verifies response signature. +// func (c *Client) processResponse(resp responseV2) error { +// if err := verifyServiceMessage(resp); err != nil { +// return fmt.Errorf("invalid response signature: %w", err) +// } +// +// return apistatus.ErrorFromV2(resp.GetMetaHeader().GetStatus()) +// } +// +// // reads response (if rResp is set) and processes it. Result means success. +// // If failed, contextCall.err contains the reason. +// func (x *contextCall) readResponse() bool { +// if x.rResp != nil { +// x.err = x.rResp() +// if x.err != nil { +// x.err = fmt.Errorf("read response: %w", x.err) +// return false +// } +// } +// +// return x.processResponse() +// } +// +// // closes the message stream (if closer is set) and writes the results (if result is set). +// // Return means success. If failed, contextCall.err contains the reason. +// func (x *contextCall) close() bool { +// if x.closer != nil { +// x.err = x.closer() +// if x.err != nil { +// x.err = fmt.Errorf("close RPC: %w", x.err) +// return false +// } +// } +// +// // write response to resulting structure +// if x.result != nil { +// x.result(x.resp) +// } +// +// return x.err == nil +// } +// +// // goes through all stages of sending a request and processing a response. Returns true if successful. +// // If failed, contextCall.err contains the reason. +// func (x *contextCall) processCall() bool { +// // set request writer +// x.wReq = func() error { +// var err error +// x.resp, err = x.call() +// return err +// } +// +// // write request +// ok := x.writeRequest() +// if x.bufCleanCallback != nil { +// x.bufCleanCallback() +// } +// +// if !ok { +// return false +// } +// +// // read response +// ok = x.readResponse() +// if !ok { +// return x.err == nil +// } +// +// // close and write response to resulting structure +// ok = x.close() +// if !ok { +// return false +// } +// +// return x.err == nil +// } // -// The f must not manipulate the client connection passed into it. +// // initializes static cross-call parameters inherited from client. +// func (c *Client) initCallContext(ctx *contextCall) { +// ctx.signer = c.prm.signer +// ctx.callbackResp = c.prm.cbRespInfo +// ctx.netMagic = c.prm.netMagic // -// Like all other operations, must be called after connecting to the server and -// before closing the connection. +// buf := c.buffers.Get().(*[]byte) +// ctx.buf = *buf +// ctx.bufCleanCallback = func() { +// c.buffers.Put(buf) +// } +// } // -// See also Dial and Close. -// See also github.com/nspcc-dev/neofs-api-go/v2/rpc/client package docs. -func (c *Client) ExecRaw(f func(client *client.Client) error) error { - return f(&c.c) -} +// // ExecRaw executes f with underlying github.com/nspcc-dev/neofs-api-go/v2/rpc/client.Client +// // instance. Communicate over the Protocol Buffers protocol in a more flexible way: +// // most often used to transmit data over a fixed version of the NeoFS protocol, as well +// // as to support custom services. +// // +// // The f must not manipulate the client connection passed into it. +// // +// // Like all other operations, must be called after connecting to the server and +// // before closing the connection. +// // +// // See also Dial and Close. +// // See also github.com/nspcc-dev/neofs-api-go/v2/rpc/client package docs. +// func (c *Client) ExecRaw(f func(client *client.Client) error) error { +// return f(&c.c) +// } diff --git a/client/container.go b/client/container.go index ba3a8f3a6..8c827b1e0 100644 --- a/client/container.go +++ b/client/container.go @@ -4,12 +4,13 @@ import ( "context" "errors" "fmt" + "time" - v2container "github.com/nspcc-dev/neofs-api-go/v2/container" - "github.com/nspcc-dev/neofs-api-go/v2/refs" - rpcapi "github.com/nspcc-dev/neofs-api-go/v2/rpc" - "github.com/nspcc-dev/neofs-api-go/v2/rpc/client" - v2session "github.com/nspcc-dev/neofs-api-go/v2/session" + apiacl "github.com/nspcc-dev/neofs-sdk-go/api/acl" + apicontainer "github.com/nspcc-dev/neofs-sdk-go/api/container" + "github.com/nspcc-dev/neofs-sdk-go/api/refs" + apisession "github.com/nspcc-dev/neofs-sdk-go/api/session" + apistatus "github.com/nspcc-dev/neofs-sdk-go/client/status" "github.com/nspcc-dev/neofs-sdk-go/container" cid "github.com/nspcc-dev/neofs-sdk-go/container/id" neofscrypto "github.com/nspcc-dev/neofs-sdk-go/crypto" @@ -19,644 +20,631 @@ import ( "github.com/nspcc-dev/neofs-sdk-go/user" ) -var ( - // special variables for test purposes, to overwrite real RPC calls. - rpcAPIPutContainer = rpcapi.PutContainer - rpcAPIGetContainer = rpcapi.GetContainer - rpcAPIListContainers = rpcapi.ListContainers - rpcAPIDeleteContainer = rpcapi.DeleteContainer - rpcAPIGetEACL = rpcapi.GetEACL - rpcAPISetEACL = rpcapi.SetEACL - rpcAPIAnnounceUsedSpace = rpcapi.AnnounceUsedSpace -) - -// PrmContainerPut groups optional parameters of ContainerPut operation. -type PrmContainerPut struct { - prmCommonMeta - +// PutContainerOptions groups optional parameters of [Client.PutContainer]. +type PutContainerOptions struct { sessionSet bool session session.Container } // WithinSession specifies session within which container should be saved. -// -// Creator of the session acquires the authorship of the request. This affects -// the execution of an operation (e.g. access control). -// -// Session is optional, if set the following requirements apply: -// - session operation MUST be session.VerbContainerPut (ForVerb) -// - token MUST be signed using private signer of the owner of the container to be saved -func (x *PrmContainerPut) WithinSession(s session.Container) { +// Session tokens grant user-to-user power of attorney: the subject can create +// containers on behalf of the issuer. Session op must be +// [session.VerbContainerPut]. If used, [Client.PutContainer] default ownership +// behavior is replaced with: +// - session issuer becomes the container's owner; +// - session must target the subject authenticated by signer passed to +// [Client.PutContainer]. +func (x *PutContainerOptions) WithinSession(s session.Container) { x.session = s x.sessionSet = true } -// ContainerPut sends request to save container in NeoFS. -// -// Any errors (local or remote, including returned status codes) are returned as Go errors, -// see [apistatus] package for NeoFS-specific error types. -// -// Operation is asynchronous and no guaranteed even in the absence of errors. -// The required time is also not predictable. -// -// Success can be verified by reading [Client.ContainerGet] using the returned -// identifier (notice that it needs some time to succeed). -// -// Context is required and must not be nil. It is used for network communication. -// -// Signer is required and must not be nil. The account corresponding to the specified Signer will be charged for the operation. -// Signer's scheme MUST be neofscrypto.ECDSA_DETERMINISTIC_SHA256. For example, you can use neofsecdsa.SignerRFC6979. -// -// Return errors: -// - [ErrMissingSigner] -func (c *Client) ContainerPut(ctx context.Context, cont container.Container, signer neofscrypto.Signer, prm PrmContainerPut) (cid.ID, error) { - var err error - defer func() { - c.sendStatistic(stat.MethodContainerPut, err)() - }() - +// PutContainer sends request to save given container in NeoFS. If the request +// is accepted, PutContainer returns no error and ID of the new container which +// is going to be saved asynchronously. The completion can be checked by polling +// the container presence using returned ID until it will appear (e.g. call +// [Client.GetContainer] until no error). +// +// Signer must authenticate container's owner. The signature scheme MUST be +// [neofscrypto.ECDSA_DETERMINISTIC_SHA256] (e.g. [neofsecdsa.SignerRFC6979]). +// Owner's NeoFS account can be charged for the operation. +func (c *Client) PutContainer(ctx context.Context, cnr container.Container, signer neofscrypto.Signer, opts PutContainerOptions) (cid.ID, error) { + var res cid.ID if signer == nil { - return cid.ID{}, ErrMissingSigner + return res, errMissingSigner + } else if scheme := signer.Scheme(); scheme != neofscrypto.ECDSA_DETERMINISTIC_SHA256 { + return res, fmt.Errorf("wrong signature scheme: %v instead of %v", scheme, neofscrypto.ECDSA_DETERMINISTIC_SHA256) } - var cnr v2container.Container - cont.WriteToV2(&cnr) - - var sig neofscrypto.Signature - err = cont.CalculateSignature(&sig, signer) - if err != nil { - err = fmt.Errorf("calculate container signature: %w", err) - return cid.ID{}, err + var err error + if c.handleAPIOpResult != nil { + defer func(start time.Time) { + c.handleAPIOpResult(c.serverPubKey, c.endpoint, stat.MethodContainerPut, time.Since(start), err) + }(time.Now()) } - var sigv2 refs.Signature - - sig.WriteToV2(&sigv2) - - // form request body - reqBody := new(v2container.PutRequestBody) - reqBody.SetContainer(&cnr) - reqBody.SetSignature(&sigv2) - - // form meta header - var meta v2session.RequestMetaHeader - writeXHeadersToMeta(prm.prmCommonMeta.xHeaders, &meta) - - if prm.sessionSet { - var tokv2 v2session.Token - prm.session.WriteToV2(&tokv2) - - meta.SetSessionToken(&tokv2) + sig, err := signer.Sign(cnr.Marshal()) + if err != nil { + err = fmt.Errorf("sign container: %w", err) // for closure above + return res, err } // form request - var req v2container.PutRequest - - req.SetBody(reqBody) - req.SetMetaHeader(&meta) - - // init call context - - var ( - cc contextCall - res cid.ID - ) - - c.initCallContext(&cc) - cc.req = &req - cc.call = func() (responseV2, error) { - return rpcAPIPutContainer(&c.c, &req, client.WithContext(ctx)) - } - cc.result = func(r responseV2) { - resp := r.(*v2container.PutResponse) - - const fieldCnrID = "container ID" - - cidV2 := resp.GetBody().GetContainerID() - if cidV2 == nil { - cc.err = newErrMissingResponseField(fieldCnrID) - return - } - - cc.err = res.ReadFromV2(*cidV2) - if cc.err != nil { - cc.err = newErrInvalidResponseField(fieldCnrID, cc.err) + req := &apicontainer.PutRequest{ + Body: &apicontainer.PutRequest_Body{ + Container: new(apicontainer.Container), + Signature: &refs.SignatureRFC6979{Key: neofscrypto.PublicKeyBytes(signer.Public()), Sign: sig}, + }, + } + cnr.WriteToV2(req.Body.Container) + if opts.sessionSet { + req.MetaHeader = &apisession.RequestMetaHeader{SessionToken: new(apisession.SessionToken)} + opts.session.WriteToV2(req.MetaHeader.SessionToken) + } + // FIXME: balance requests need small fixed-size buffers for encoding, its makes + // no sense to mosh them with other buffers + buf := c.signBuffers.Get().(*[]byte) + defer c.signBuffers.Put(buf) + if req.VerifyHeader, err = neofscrypto.SignRequest(signer, req, req.Body, *buf); err != nil { + err = fmt.Errorf("%s: %w", errSignRequest, err) // for closure above + return res, err + } + + // send request + resp, err := c.transport.container.Put(ctx, req) + if err != nil { + err = fmt.Errorf("%s: %w", errTransport, err) // for closure above + return res, err + } + + // intercept response info + if c.interceptAPIRespInfo != nil { + if err = c.interceptAPIRespInfo(ResponseMetaInfo{ + key: resp.GetVerifyHeader().GetBodySignature().GetKey(), + epoch: resp.GetMetaHeader().GetEpoch(), + }); err != nil { + err = fmt.Errorf("%s: %w", errInterceptResponseInfo, err) // for closure above + return res, err } } - // process call - if !cc.processCall() { - err = cc.err - return cid.ID{}, cc.err + // verify response integrity + if err = neofscrypto.VerifyResponse(resp, resp.Body); err != nil { + err = fmt.Errorf("%s: %w", errResponseSignature, err) // for closure above + return res, err + } + sts, err := apistatus.ErrorFromV2(resp.GetMetaHeader().GetStatus()) + if err != nil { + err = fmt.Errorf("%s: %w", errInvalidResponseStatus, err) // for closure above + return res, err + } + if sts != nil { + err = sts // for closure above + return res, err } + // decode response payload + if resp.Body == nil { + err = errors.New(errMissingResponseBody) // for closure above + return res, err + } + const fieldID = "ID" + if resp.Body.ContainerId == nil { + err = fmt.Errorf("%s (%s)", errMissingResponseBodyField, fieldID) // for closure above + return res, err + } else if err = res.ReadFromV2(resp.Body.ContainerId); err != nil { + err = fmt.Errorf("%s (%s): %w", errInvalidResponseBodyField, fieldID, err) // for closure above + return res, err + } return res, nil } -// PrmContainerGet groups optional parameters of ContainerGet operation. -type PrmContainerGet struct { - prmCommonMeta -} +// GetContainerOptions groups optional parameters of [Client.GetContainer]. +type GetContainerOptions struct{} -// ContainerGet reads NeoFS container by ID. -// -// Any errors (local or remote, including returned status codes) are returned as Go errors, -// see [apistatus] package for NeoFS-specific error types. -// -// Context is required and must not be nil. It is used for network communication. -func (c *Client) ContainerGet(ctx context.Context, id cid.ID, prm PrmContainerGet) (container.Container, error) { +// GetContainer reads NeoFS container by ID. Returns +// [apistatus.ErrContainerNotFound] if there is no such container. +func (c *Client) GetContainer(ctx context.Context, id cid.ID, _ GetContainerOptions) (container.Container, error) { + var res container.Container var err error - defer func() { - c.sendStatistic(stat.MethodContainerGet, err)() - }() - - var cidV2 refs.ContainerID - id.WriteToV2(&cidV2) - - // form request body - reqBody := new(v2container.GetRequestBody) - reqBody.SetContainerID(&cidV2) + if c.handleAPIOpResult != nil { + defer func(start time.Time) { + c.handleAPIOpResult(c.serverPubKey, c.endpoint, stat.MethodContainerGet, time.Since(start), err) + }(time.Now()) + } // form request - var req v2container.GetRequest - - req.SetBody(reqBody) - - // init call context - - var ( - cc contextCall - res container.Container - ) - - c.initCallContext(&cc) - cc.meta = prm.prmCommonMeta - cc.req = &req - cc.call = func() (responseV2, error) { - return rpcAPIGetContainer(&c.c, &req, client.WithContext(ctx)) + req := &apicontainer.GetRequest{ + Body: &apicontainer.GetRequest_Body{ContainerId: new(refs.ContainerID)}, + } + id.WriteToV2(req.Body.ContainerId) + // FIXME: balance requests need small fixed-size buffers for encoding, its makes + // no sense to mosh them with other buffers + buf := c.signBuffers.Get().(*[]byte) + defer c.signBuffers.Put(buf) + if req.VerifyHeader, err = neofscrypto.SignRequest(c.signer, req, req.Body, *buf); err != nil { + err = fmt.Errorf("%s: %w", errSignRequest, err) // for closure above + return res, err } - cc.result = func(r responseV2) { - resp := r.(*v2container.GetResponse) - - cnrV2 := resp.GetBody().GetContainer() - if cnrV2 == nil { - cc.err = errors.New("missing container in response") - return - } - cc.err = res.ReadFromV2(*cnrV2) - if cc.err != nil { - cc.err = fmt.Errorf("invalid container in response: %w", cc.err) + // send request + resp, err := c.transport.container.Get(ctx, req) + if err != nil { + err = fmt.Errorf("%s: %w", errTransport, err) // for closure above + return res, err + } + + // intercept response info + if c.interceptAPIRespInfo != nil { + if err = c.interceptAPIRespInfo(ResponseMetaInfo{ + key: resp.GetVerifyHeader().GetBodySignature().GetKey(), + epoch: resp.GetMetaHeader().GetEpoch(), + }); err != nil { + err = fmt.Errorf("%s: %w", errInterceptResponseInfo, err) // for closure above + return res, err } } - // process call - if !cc.processCall() { - err = cc.err - return container.Container{}, cc.err + // verify response integrity + if err = neofscrypto.VerifyResponse(resp, resp.Body); err != nil { + err = fmt.Errorf("%s: %w", errResponseSignature, err) // for closure above + return res, err + } + sts, err := apistatus.ErrorFromV2(resp.GetMetaHeader().GetStatus()) + if err != nil { + err = fmt.Errorf("%s: %w", errInvalidResponseStatus, err) // for closure above + return res, err + } + if sts != nil { + err = sts // for closure above + return res, err } + // decode response payload + if resp.Body == nil { + err = errors.New(errMissingResponseBody) // for closure above + return res, err + } + const fieldContainer = "container" + if resp.Body.Container == nil { + err = fmt.Errorf("%s (%s)", errMissingResponseBodyField, fieldContainer) // for closure above + return res, err + } else if err = res.ReadFromV2(resp.Body.Container); err != nil { + err = fmt.Errorf("%s (%s): %w", errInvalidResponseBodyField, fieldContainer, err) // for closure above + return res, err + } return res, nil } -// PrmContainerList groups optional parameters of ContainerList operation. -type PrmContainerList struct { - prmCommonMeta -} +// ListContainersOptions groups optional parameters of [Client.ListContainers]. +type ListContainersOptions struct{} -// ContainerList requests identifiers of the account-owned containers. -// -// Any errors (local or remote, including returned status codes) are returned as Go errors, -// see [apistatus] package for NeoFS-specific error types. -// -// Context is required and must not be nil. It is used for network communication. -func (c *Client) ContainerList(ctx context.Context, ownerID user.ID, prm PrmContainerList) ([]cid.ID, error) { +// ListContainers requests identifiers of all user-owned containers. +func (c *Client) ListContainers(ctx context.Context, usr user.ID, _ ListContainersOptions) ([]cid.ID, error) { var err error - defer func() { - c.sendStatistic(stat.MethodContainerList, err)() - }() - - // form request body - var ownerV2 refs.OwnerID - ownerID.WriteToV2(&ownerV2) - - reqBody := new(v2container.ListRequestBody) - reqBody.SetOwnerID(&ownerV2) + if c.handleAPIOpResult != nil { + defer func(start time.Time) { + c.handleAPIOpResult(c.serverPubKey, c.endpoint, stat.MethodContainerList, time.Since(start), err) + }(time.Now()) + } // form request - var req v2container.ListRequest - - req.SetBody(reqBody) - - // init call context - - var ( - cc contextCall - res []cid.ID - ) - - c.initCallContext(&cc) - cc.meta = prm.prmCommonMeta - cc.req = &req - cc.call = func() (responseV2, error) { - return rpcAPIListContainers(&c.c, &req, client.WithContext(ctx)) + req := &apicontainer.ListRequest{ + Body: &apicontainer.ListRequest_Body{OwnerId: new(refs.OwnerID)}, + } + usr.WriteToV2(req.Body.OwnerId) + // FIXME: balance requests need small fixed-size buffers for encoding, its makes + // no sense to mosh them with other buffers + buf := c.signBuffers.Get().(*[]byte) + defer c.signBuffers.Put(buf) + if req.VerifyHeader, err = neofscrypto.SignRequest(c.signer, req, req.Body, *buf); err != nil { + err = fmt.Errorf("%s: %w", errSignRequest, err) // for closure above + return nil, err } - cc.result = func(r responseV2) { - resp := r.(*v2container.ListResponse) - - res = make([]cid.ID, len(resp.GetBody().GetContainerIDs())) - for i, cidV2 := range resp.GetBody().GetContainerIDs() { - cc.err = res[i].ReadFromV2(cidV2) - if cc.err != nil { - cc.err = fmt.Errorf("invalid ID in the response: %w", cc.err) - return - } + // send request + resp, err := c.transport.container.List(ctx, req) + if err != nil { + err = fmt.Errorf("%s: %w", errTransport, err) // for closure above + return nil, err + } + + // intercept response info + if c.interceptAPIRespInfo != nil { + if err = c.interceptAPIRespInfo(ResponseMetaInfo{ + key: resp.GetVerifyHeader().GetBodySignature().GetKey(), + epoch: resp.GetMetaHeader().GetEpoch(), + }); err != nil { + err = fmt.Errorf("%s: %w", errInterceptResponseInfo, err) // for closure above + return nil, err } } - // process call - if !cc.processCall() { - err = cc.err - return nil, cc.err + // verify response integrity + if err = neofscrypto.VerifyResponse(resp, resp.Body); err != nil { + err = fmt.Errorf("%s: %w", errResponseSignature, err) // for closure above + return nil, err + } + sts, err := apistatus.ErrorFromV2(resp.GetMetaHeader().GetStatus()) + if err != nil { + err = fmt.Errorf("%s: %w", errInvalidResponseStatus, err) // for closure above + return nil, err + } + if sts != nil { + err = sts // for closure above + return nil, err + } + + // decode response payload + var res []cid.ID + if resp.Body != nil && len(resp.Body.ContainerIds) > 0 { + const fieldIDs = "ID list" + res = make([]cid.ID, len(resp.Body.ContainerIds)) + for i := range resp.Body.ContainerIds { + if resp.Body.ContainerIds == nil { + err = fmt.Errorf("%s (%s): nil element #%d", errInvalidResponseBodyField, fieldIDs, i) // for closure above + return nil, err + } else if err = res[i].ReadFromV2(resp.Body.ContainerIds[i]); err != nil { + err = fmt.Errorf("%s (%s): invalid element #%d: %w", errInvalidResponseBodyField, fieldIDs, i, err) // for closure above + return nil, err + } + } } - return res, nil } -// PrmContainerDelete groups optional parameters of ContainerDelete operation. -type PrmContainerDelete struct { - prmCommonMeta - - tokSet bool - tok session.Container +// DeleteContainerOptions groups optional parameters of +// [Client.DeleteContainer]. +type DeleteContainerOptions struct { + sessionSet bool + session session.Container } // WithinSession specifies session within which container should be removed. -// -// Creator of the session acquires the authorship of the request. -// This may affect the execution of an operation (e.g. access control). -// -// Must be signed. -func (x *PrmContainerDelete) WithinSession(tok session.Container) { - x.tok = tok - x.tokSet = true +// Session tokens grant user-to-user power of attorney: the subject can remove +// specified issuer's containers. Session op must be +// [session.VerbContainerDelete]. If used, [Client.DeleteContainer] default +// ownership behavior is replaced with: +// - session must be issued by the container owner; +// - session must target the subject authenticated by signer passed to +// [Client.DeleteContainer]. +func (x *DeleteContainerOptions) WithinSession(tok session.Container) { + x.session = tok + x.sessionSet = true } -// ContainerDelete sends request to remove the NeoFS container. -// -// Any errors (local or remote, including returned status codes) are returned as Go errors, -// see [apistatus] package for NeoFS-specific error types. -// -// Operation is asynchronous and no guaranteed even in the absence of errors. -// The required time is also not predictable. +// DeleteContainer sends request to remove the NeoFS container. If the request +// is accepted, DeleteContainer returns no error and the container is going to +// be removed asynchronously. The completion can be checked by polling the +// container presence until it won't be found (e.g. call [Client.GetContainer] +// until [apistatus.ErrContainerNotFound]). // -// Success can be verified by reading by identifier (see GetContainer). -// -// Context is required and must not be nil. It is used for network communication. -// -// Signer is required and must not be nil. The account corresponding to the specified Signer will be charged for the operation. -// Signer's scheme MUST be neofscrypto.ECDSA_DETERMINISTIC_SHA256. For example, you can use neofsecdsa.SignerRFC6979. -// -// Reflects all internal errors in second return value (transport problems, response processing, etc.). -// Return errors: -// - [ErrMissingSigner] -func (c *Client) ContainerDelete(ctx context.Context, id cid.ID, signer neofscrypto.Signer, prm PrmContainerDelete) error { - var err error - defer func() { - c.sendStatistic(stat.MethodContainerDelete, err)() - }() - +// Signer must authenticate container's owner. The signature scheme MUST be +// [neofscrypto.ECDSA_DETERMINISTIC_SHA256] (e.g. [neofsecdsa.SignerRFC6979]). +// Corresponding NeoFS account can be charged for the operation. +func (c *Client) DeleteContainer(ctx context.Context, id cid.ID, signer neofscrypto.Signer, opts DeleteContainerOptions) error { if signer == nil { - return ErrMissingSigner + return errMissingSigner + } else if scheme := signer.Scheme(); scheme != neofscrypto.ECDSA_DETERMINISTIC_SHA256 { + return fmt.Errorf("wrong signature scheme: %v instead of %v", scheme, neofscrypto.ECDSA_DETERMINISTIC_SHA256) } - // sign container ID - var cidV2 refs.ContainerID - id.WriteToV2(&cidV2) - - // container contract expects signature of container ID value - // don't get confused with stable marshaled protobuf container.ID structure - data := cidV2.GetValue() + var err error + if c.handleAPIOpResult != nil { + defer func(start time.Time) { + c.handleAPIOpResult(c.serverPubKey, c.endpoint, stat.MethodContainerDelete, time.Since(start), err) + }(time.Now()) + } - var sig neofscrypto.Signature - err = sig.Calculate(signer, data) + sig, err := signer.Sign(id[:]) if err != nil { - err = fmt.Errorf("calculate signature: %w", err) + err = fmt.Errorf("sign container ID: %w", err) // for closure above return err } - var sigv2 refs.Signature - - sig.WriteToV2(&sigv2) - - // form request body - reqBody := new(v2container.DeleteRequestBody) - reqBody.SetContainerID(&cidV2) - reqBody.SetSignature(&sigv2) - - // form meta header - var meta v2session.RequestMetaHeader - writeXHeadersToMeta(prm.prmCommonMeta.xHeaders, &meta) - - if prm.tokSet { - var tokv2 v2session.Token - prm.tok.WriteToV2(&tokv2) - - meta.SetSessionToken(&tokv2) - } - // form request - var req v2container.DeleteRequest - - req.SetBody(reqBody) - req.SetMetaHeader(&meta) - - // init call context - - var ( - cc contextCall - ) + req := &apicontainer.DeleteRequest{ + Body: &apicontainer.DeleteRequest_Body{ + ContainerId: new(refs.ContainerID), + Signature: &refs.SignatureRFC6979{Key: neofscrypto.PublicKeyBytes(signer.Public()), Sign: sig}, + }, + } + id.WriteToV2(req.Body.ContainerId) + if opts.sessionSet { + req.MetaHeader = &apisession.RequestMetaHeader{SessionToken: new(apisession.SessionToken)} + opts.session.WriteToV2(req.MetaHeader.SessionToken) + } + // FIXME: balance requests need small fixed-size buffers for encoding, its makes + // no sense to mosh them with other buffers + buf := c.signBuffers.Get().(*[]byte) + defer c.signBuffers.Put(buf) + if req.VerifyHeader, err = neofscrypto.SignRequest(signer, req, req.Body, *buf); err != nil { + err = fmt.Errorf("%s: %w", errSignRequest, err) // for closure above + return err + } - c.initCallContext(&cc) - cc.req = &req - cc.call = func() (responseV2, error) { - return rpcAPIDeleteContainer(&c.c, &req, client.WithContext(ctx)) + // send request + resp, err := c.transport.container.Delete(ctx, req) + if err != nil { + err = fmt.Errorf("%s: %w", errTransport, err) // for closure above + return err } - // process call - if !cc.processCall() { - err = cc.err - return cc.err + // intercept response info + if c.interceptAPIRespInfo != nil { + if err = c.interceptAPIRespInfo(ResponseMetaInfo{ + key: resp.GetVerifyHeader().GetBodySignature().GetKey(), + epoch: resp.GetMetaHeader().GetEpoch(), + }); err != nil { + err = fmt.Errorf("%s: %w", errInterceptResponseInfo, err) // for closure above + return err + } } - return nil + // verify response integrity + if err = neofscrypto.VerifyResponse(resp, resp.Body); err != nil { + err = fmt.Errorf("%s: %w", errResponseSignature, err) // for closure above + return err + } + sts, err := apistatus.ErrorFromV2(resp.GetMetaHeader().GetStatus()) + if err != nil { + err = fmt.Errorf("%s: %w", errInvalidResponseStatus, err) // for closure above + } else if sts != nil { + err = sts // for closure above + } + return err } -// PrmContainerEACL groups optional parameters of ContainerEACL operation. -type PrmContainerEACL struct { - prmCommonMeta -} +// GetEACLOptions groups optional parameters of [Client.GetEACL]. +type GetEACLOptions struct{} -// ContainerEACL reads eACL table of the NeoFS container. -// -// Any errors (local or remote, including returned status codes) are returned as Go errors, -// see [apistatus] package for NeoFS-specific error types. -// -// Context is required and must not be nil. It is used for network communication. -func (c *Client) ContainerEACL(ctx context.Context, id cid.ID, prm PrmContainerEACL) (eacl.Table, error) { +// GetEACL reads eACL table of the NeoFS container. Returns +// [apistatus.ErrEACLNotFound] if eACL is unset for this container. Returns +// [apistatus.ErrContainerNotFound] if there is no such container. +func (c *Client) GetEACL(ctx context.Context, id cid.ID, _ GetEACLOptions) (eacl.Table, error) { + var res eacl.Table var err error - defer func() { - c.sendStatistic(stat.MethodContainerEACL, err)() - }() - - var cidV2 refs.ContainerID - id.WriteToV2(&cidV2) - - // form request body - reqBody := new(v2container.GetExtendedACLRequestBody) - reqBody.SetContainerID(&cidV2) + if c.handleAPIOpResult != nil { + defer func(start time.Time) { + c.handleAPIOpResult(c.serverPubKey, c.endpoint, stat.MethodContainerEACL, time.Since(start), err) + }(time.Now()) + } // form request - var req v2container.GetExtendedACLRequest - - req.SetBody(reqBody) - - // init call context - - var ( - cc contextCall - res eacl.Table - ) - - c.initCallContext(&cc) - cc.meta = prm.prmCommonMeta - cc.req = &req - cc.call = func() (responseV2, error) { - return rpcAPIGetEACL(&c.c, &req, client.WithContext(ctx)) + req := &apicontainer.GetExtendedACLRequest{ + Body: &apicontainer.GetExtendedACLRequest_Body{ContainerId: new(refs.ContainerID)}, + } + id.WriteToV2(req.Body.ContainerId) + // FIXME: balance requests need small fixed-size buffers for encoding, its makes + // no sense to mosh them with other buffers + buf := c.signBuffers.Get().(*[]byte) + defer c.signBuffers.Put(buf) + if req.VerifyHeader, err = neofscrypto.SignRequest(c.signer, req, req.Body, *buf); err != nil { + err = fmt.Errorf("%s: %w", errSignRequest, err) // for closure above + return res, err } - cc.result = func(r responseV2) { - resp := r.(*v2container.GetExtendedACLResponse) - eACL := resp.GetBody().GetEACL() - if eACL == nil { - cc.err = newErrMissingResponseField("eACL") - return + // send request + resp, err := c.transport.container.GetExtendedACL(ctx, req) + if err != nil { + err = fmt.Errorf("%s: %w", errTransport, err) // for closure above + return res, err + } + + // intercept response info + if c.interceptAPIRespInfo != nil { + if err = c.interceptAPIRespInfo(ResponseMetaInfo{ + key: resp.GetVerifyHeader().GetBodySignature().GetKey(), + epoch: resp.GetMetaHeader().GetEpoch(), + }); err != nil { + err = fmt.Errorf("%s: %w", errInterceptResponseInfo, err) // for closure above + return res, err } - - res = *eacl.NewTableFromV2(eACL) } - // process call - if !cc.processCall() { - err = cc.err - return eacl.Table{}, cc.err + // verify response integrity + if err = neofscrypto.VerifyResponse(resp, resp.Body); err != nil { + err = fmt.Errorf("%s: %w", errResponseSignature, err) // for closure above + return res, err + } + sts, err := apistatus.ErrorFromV2(resp.GetMetaHeader().GetStatus()) + if err != nil { + err = fmt.Errorf("%s: %w", errInvalidResponseStatus, err) // for closure above + return res, err + } + if sts != nil { + err = sts + return res, err // for closure above } + // decode response payload + if resp.Body == nil { + err = errors.New(errMissingResponseBody) // for closure above + return res, err + } + const fieldEACL = "eACL" + if resp.Body.Eacl == nil { + err = fmt.Errorf("%s (%s)", errMissingResponseBodyField, fieldEACL) // for closure above + return res, err + } else if err = res.ReadFromV2(resp.Body.Eacl); err != nil { + err = fmt.Errorf("%s (%s): %w", errInvalidResponseBodyField, fieldEACL, err) // for closure above + return res, err + } return res, nil } -// PrmContainerSetEACL groups optional parameters of ContainerSetEACL operation. -type PrmContainerSetEACL struct { - prmCommonMeta - +// SetEACLOptions groups optional parameters of [Client.SetEACLOptions]. +type SetEACLOptions struct { sessionSet bool session session.Container } // WithinSession specifies session within which extended ACL of the container -// should be saved. -// -// Creator of the session acquires the authorship of the request. This affects -// the execution of an operation (e.g. access control). -// -// Session is optional, if set the following requirements apply: -// - if particular container is specified (ApplyOnlyTo), it MUST equal the container -// for which extended ACL is going to be set -// - session operation MUST be session.VerbContainerSetEACL (ForVerb) -// - token MUST be signed using private signer of the owner of the container to be saved -func (x *PrmContainerSetEACL) WithinSession(s session.Container) { +// should be saved. Session tokens grant user-to-user power of attorney: the +// subject can modify eACL rules of specified issuer's containers. Session op +// must be [session.VerbContainerSetEACL]. If used, [Client.SetEACL] default +// ownership behavior is replaced with: +// - session must be issued by the container owner; +// - session must target the subject authenticated by signer passed to +// [Client.SetEACL]. +func (x *SetEACLOptions) WithinSession(s session.Container) { x.session = s x.sessionSet = true } -// ContainerSetEACL sends request to update eACL table of the NeoFS container. -// -// Any errors (local or remote, including returned status codes) are returned as Go errors, -// see [apistatus] package for NeoFS-specific error types. -// -// Operation is asynchronous and no guaranteed even in the absence of errors. -// The required time is also not predictable. -// -// Success can be verified by reading by identifier (see EACL). -// -// Signer is required and must not be nil. The account corresponding to the specified Signer will be charged for the operation. -// Signer's scheme MUST be neofscrypto.ECDSA_DETERMINISTIC_SHA256. For example, you can use neofsecdsa.SignerRFC6979. -// -// Return errors: -// - [ErrMissingEACLContainer] -// - [ErrMissingSigner] +// SetEACL sends request to update eACL table of the NeoFS container. If the +// request is accepted, SetEACL returns no error and the eACL is going to be set +// asynchronously. The completion can be checked by eACL polling +// ([Client.GetEACL]) and binary comparison. SetEACL returns +// [apistatus.ErrContainerNotFound] if container is missing. // -// Context is required and must not be nil. It is used for network communication. -func (c *Client) ContainerSetEACL(ctx context.Context, table eacl.Table, signer user.Signer, prm PrmContainerSetEACL) error { - var err error - defer func() { - c.sendStatistic(stat.MethodContainerSetEACL, err)() - }() - +// Signer must authenticate container's owner. The signature scheme MUST be +// [neofscrypto.ECDSA_DETERMINISTIC_SHA256] (e.g. [neofsecdsa.SignerRFC6979]). +// Owner's NeoFS account can be charged for the operation. +func (c *Client) SetEACL(ctx context.Context, eACL eacl.Table, signer neofscrypto.Signer, opts SetEACLOptions) error { if signer == nil { - return ErrMissingSigner + return errMissingSigner + } else if scheme := signer.Scheme(); scheme != neofscrypto.ECDSA_DETERMINISTIC_SHA256 { + return fmt.Errorf("wrong signature scheme: %v instead of %v", scheme, neofscrypto.ECDSA_DETERMINISTIC_SHA256) + } else if eACL.LimitedContainer().IsZero() { + return errors.New("missing container in the eACL") } - _, isCIDSet := table.CID() - if !isCIDSet { - err = ErrMissingEACLContainer - return err + var err error + if c.handleAPIOpResult != nil { + defer func(start time.Time) { + c.handleAPIOpResult(c.serverPubKey, c.endpoint, stat.MethodContainerSetEACL, time.Since(start), err) + }(time.Now()) } - // sign the eACL table - eaclV2 := table.ToV2() - - var sig neofscrypto.Signature - err = sig.CalculateMarshalled(signer, eaclV2, nil) + sig, err := signer.Sign(eACL.Marshal()) if err != nil { - err = fmt.Errorf("calculate signature: %w", err) + err = fmt.Errorf("sign eACL: %w", err) // for closure above return err } - var sigv2 refs.Signature - - sig.WriteToV2(&sigv2) - - // form request body - reqBody := new(v2container.SetExtendedACLRequestBody) - reqBody.SetEACL(eaclV2) - reqBody.SetSignature(&sigv2) - - // form meta header - var meta v2session.RequestMetaHeader - writeXHeadersToMeta(prm.prmCommonMeta.xHeaders, &meta) - - if prm.sessionSet { - var tokv2 v2session.Token - prm.session.WriteToV2(&tokv2) - - meta.SetSessionToken(&tokv2) - } - // form request - var req v2container.SetExtendedACLRequest - - req.SetBody(reqBody) - req.SetMetaHeader(&meta) - - // init call context - - var ( - cc contextCall - ) + req := &apicontainer.SetExtendedACLRequest{ + Body: &apicontainer.SetExtendedACLRequest_Body{ + Eacl: new(apiacl.EACLTable), + Signature: &refs.SignatureRFC6979{Key: neofscrypto.PublicKeyBytes(signer.Public()), Sign: sig}, + }, + } + eACL.WriteToV2(req.Body.Eacl) + if opts.sessionSet { + req.MetaHeader = &apisession.RequestMetaHeader{SessionToken: new(apisession.SessionToken)} + opts.session.WriteToV2(req.MetaHeader.SessionToken) + } + // FIXME: balance requests need small fixed-size buffers for encoding, its makes + // no sense to mosh them with other buffers + buf := c.signBuffers.Get().(*[]byte) + defer c.signBuffers.Put(buf) + if req.VerifyHeader, err = neofscrypto.SignRequest(signer, req, req.Body, *buf); err != nil { + err = fmt.Errorf("%s: %w", errSignRequest, err) // for closure above + return err + } - c.initCallContext(&cc) - cc.req = &req - cc.call = func() (responseV2, error) { - return rpcAPISetEACL(&c.c, &req, client.WithContext(ctx)) + // send request + resp, err := c.transport.container.SetExtendedACL(ctx, req) + if err != nil { + err = fmt.Errorf("%s: %w", errTransport, err) // for closure above + return err } - // process call - if !cc.processCall() { - err = cc.err - return cc.err + // intercept response info + if c.interceptAPIRespInfo != nil { + if err = c.interceptAPIRespInfo(ResponseMetaInfo{ + key: resp.GetVerifyHeader().GetBodySignature().GetKey(), + epoch: resp.GetMetaHeader().GetEpoch(), + }); err != nil { + err = fmt.Errorf("%s: %w", errInterceptResponseInfo, err) // for closure above + return err + } } - return nil + // verify response integrity + if err = neofscrypto.VerifyResponse(resp, resp.Body); err != nil { + err = fmt.Errorf("%s: %w", errResponseSignature, err) // for closure above + return err + } + sts, err := apistatus.ErrorFromV2(resp.GetMetaHeader().GetStatus()) + if err != nil { + err = fmt.Errorf("%s: %w", errInvalidResponseStatus, err) // for closure above + } else { + err = sts // for closure above + } + return err } -// PrmAnnounceSpace groups optional parameters of ContainerAnnounceUsedSpace operation. -type PrmAnnounceSpace struct { - prmCommonMeta -} +// SendContainerSizeEstimationsOptions groups optional parameters of +// [Client.SendContainerSizeEstimations]. +type SendContainerSizeEstimationsOptions struct{} -// ContainerAnnounceUsedSpace sends request to announce volume of the space used for the container objects. -// -// Any errors (local or remote, including returned status codes) are returned as Go errors, -// see [apistatus] package for NeoFS-specific error types. -// -// Operation is asynchronous and no guaranteed even in the absence of errors. -// The required time is also not predictable. -// -// At this moment success can not be checked. -// -// Context is required and must not be nil. It is used for network communication. +// SendContainerSizeEstimations sends container size estimations to the remote +// node. The estimation set must not be empty. // -// Announcements parameter MUST NOT be empty slice. -// -// Return errors: -// - [ErrMissingAnnouncements] -func (c *Client) ContainerAnnounceUsedSpace(ctx context.Context, announcements []container.SizeEstimation, prm PrmAnnounceSpace) error { - var err error - defer func() { - c.sendStatistic(stat.MethodContainerAnnounceUsedSpace, err)() - }() - - if len(announcements) == 0 { - err = ErrMissingAnnouncements - return err +// SendContainerSizeEstimations is used for system needs and is not intended to +// be called by regular users. +func (c *Client) SendContainerSizeEstimations(ctx context.Context, es []container.SizeEstimation, _ SendContainerSizeEstimationsOptions) error { + if len(es) == 0 { + return errors.New("missing estimations") } - // convert list of SDK announcement structures into NeoFS-API v2 list - v2announce := make([]v2container.UsedSpaceAnnouncement, len(announcements)) - for i := range announcements { - announcements[i].WriteToV2(&v2announce[i]) + var err error + if c.handleAPIOpResult != nil { + defer func(start time.Time) { + c.handleAPIOpResult(c.serverPubKey, c.endpoint, stat.MethodContainerAnnounceUsedSpace, time.Since(start), err) + }(time.Now()) } - // prepare body of the NeoFS-API v2 request and request itself - reqBody := new(v2container.AnnounceUsedSpaceRequestBody) - reqBody.SetAnnouncements(v2announce) - // form request - var req v2container.AnnounceUsedSpaceRequest - - req.SetBody(reqBody) - - // init call context - - var ( - cc contextCall - ) - - c.initCallContext(&cc) - cc.meta = prm.prmCommonMeta - cc.req = &req - cc.call = func() (responseV2, error) { - return rpcAPIAnnounceUsedSpace(&c.c, &req, client.WithContext(ctx)) + req := &apicontainer.AnnounceUsedSpaceRequest{ + Body: &apicontainer.AnnounceUsedSpaceRequest_Body{ + Announcements: make([]*apicontainer.AnnounceUsedSpaceRequest_Body_Announcement, len(es)), + }, + } + for i := range es { + req.Body.Announcements[i] = new(apicontainer.AnnounceUsedSpaceRequest_Body_Announcement) + es[i].WriteToV2(req.Body.Announcements[i]) + } + // FIXME: balance requests need small fixed-size buffers for encoding, its makes + // no sense to mosh them with other buffers + buf := c.signBuffers.Get().(*[]byte) + defer c.signBuffers.Put(buf) + if req.VerifyHeader, err = neofscrypto.SignRequest(c.signer, req, req.Body, *buf); err != nil { + err = fmt.Errorf("%s: %w", errSignRequest, err) // for closure above + return err } - // process call - if !cc.processCall() { - err = cc.err - return cc.err + // send request + resp, err := c.transport.container.AnnounceUsedSpace(ctx, req) + if err != nil { + err = fmt.Errorf("%s: %w", errTransport, err) // for closure above + return err } - return nil -} - -// SyncContainerWithNetwork requests network configuration using passed [NetworkInfoExecutor] -// and applies/rewrites it to the container. -// -// Returns any network/parsing config errors. -// -// See also [client.Client.NetworkInfo], [container.Container.ApplyNetworkConfig]. -func SyncContainerWithNetwork(ctx context.Context, cnr *container.Container, c NetworkInfoExecutor) error { - if cnr == nil { - return errors.New("empty container") + // intercept response info + if c.interceptAPIRespInfo != nil { + if err = c.interceptAPIRespInfo(ResponseMetaInfo{ + key: resp.GetVerifyHeader().GetBodySignature().GetKey(), + epoch: resp.GetMetaHeader().GetEpoch(), + }); err != nil { + err = fmt.Errorf("%s: %w", errInterceptResponseInfo, err) // for closure above + return err + } } - res, err := c.NetworkInfo(ctx, PrmNetworkInfo{}) + // verify response integrity + if err = neofscrypto.VerifyResponse(resp, resp.Body); err != nil { + err = fmt.Errorf("%s: %w", errResponseSignature, err) // for closure above + return err + } + sts, err := apistatus.ErrorFromV2(resp.GetMetaHeader().GetStatus()) if err != nil { - return fmt.Errorf("network info call: %w", err) + err = fmt.Errorf("%s: %w", errInvalidResponseStatus, err) // for closure above } - - cnr.ApplyNetworkConfig(res) - - return nil + if sts != nil { + err = sts // for closure above + } + return err } diff --git a/client/container_statistic_test.go b/client/container_statistic_test.go index 5c4a3a728..ae789a26b 100644 --- a/client/container_statistic_test.go +++ b/client/container_statistic_test.go @@ -1,914 +1,877 @@ package client -import ( - "context" - "crypto/rand" - "crypto/sha256" - "fmt" - mathRand "math/rand" - "strconv" - "testing" - "time" - - "github.com/google/uuid" - "github.com/nspcc-dev/neofs-api-go/v2/accounting" - v2acl "github.com/nspcc-dev/neofs-api-go/v2/acl" - v2container "github.com/nspcc-dev/neofs-api-go/v2/container" - netmapv2 "github.com/nspcc-dev/neofs-api-go/v2/netmap" - v2object "github.com/nspcc-dev/neofs-api-go/v2/object" - "github.com/nspcc-dev/neofs-api-go/v2/refs" - "github.com/nspcc-dev/neofs-api-go/v2/reputation" - rpcapi "github.com/nspcc-dev/neofs-api-go/v2/rpc" - "github.com/nspcc-dev/neofs-api-go/v2/rpc/client" - "github.com/nspcc-dev/neofs-api-go/v2/session" - "github.com/nspcc-dev/neofs-sdk-go/container" - "github.com/nspcc-dev/neofs-sdk-go/container/acl" - cid "github.com/nspcc-dev/neofs-sdk-go/container/id" - neofscrypto "github.com/nspcc-dev/neofs-sdk-go/crypto" - "github.com/nspcc-dev/neofs-sdk-go/crypto/test" - "github.com/nspcc-dev/neofs-sdk-go/eacl" - "github.com/nspcc-dev/neofs-sdk-go/netmap" - "github.com/nspcc-dev/neofs-sdk-go/object" - oid "github.com/nspcc-dev/neofs-sdk-go/object/id" - reputation2 "github.com/nspcc-dev/neofs-sdk-go/reputation" - session2 "github.com/nspcc-dev/neofs-sdk-go/session" - "github.com/nspcc-dev/neofs-sdk-go/stat" - "github.com/nspcc-dev/neofs-sdk-go/user" - "github.com/stretchr/testify/require" -) - -type ( - methodStatistic struct { - requests int - errors int - duration time.Duration - } - - testStatCollector struct { - methods map[stat.Method]*methodStatistic - } -) - -func newCollector() *testStatCollector { - c := testStatCollector{ - methods: make(map[stat.Method]*methodStatistic), - } - - for i := stat.MethodBalanceGet; i < stat.MethodLast; i++ { - c.methods[i] = &methodStatistic{} - } - - return &c -} - -func (c *testStatCollector) Collect(_ []byte, _ string, method stat.Method, duration time.Duration, err error) { - data, ok := c.methods[method] - if ok { - data.duration += duration - if duration > 0 { - data.requests++ - } - - if err != nil { - data.errors++ - } - } -} - -func randBytes(l int) []byte { - r := make([]byte, l) - _, _ = rand.Read(r) - - return r -} - -func randRefsContainerID() *refs.ContainerID { - var id refs.ContainerID - id.SetValue(randBytes(sha256.Size)) - return &id -} - -func randContainerID() *cid.ID { - var refID refs.ContainerID - refID.SetValue(randBytes(sha256.Size)) - - var id cid.ID - _ = id.ReadFromV2(refID) - - return &id -} - -func randAccount(signer user.Signer) *user.ID { - u := signer.UserID() - - return &u -} - -func randOwner(signer user.Signer) *refs.OwnerID { - acc := randAccount(signer) - - var u refs.OwnerID - acc.WriteToV2(&u) - - return &u -} - -func prepareContainer(accountID user.ID) container.Container { - cont := container.Container{} - cont.Init() - cont.SetOwner(accountID) - cont.SetBasicACL(acl.PublicRW) - - cont.SetName(strconv.FormatInt(time.Now().UnixNano(), 16)) - cont.SetCreationTime(time.Now().UTC()) - - var pp netmap.PlacementPolicy - var rd netmap.ReplicaDescriptor - rd.SetNumberOfObjects(1) - - pp.SetContainerBackupFactor(1) - pp.SetReplicas([]netmap.ReplicaDescriptor{rd}) - cont.SetPlacementPolicy(pp) - - return cont -} - -func testEaclTable(containerID cid.ID) eacl.Table { - var table eacl.Table - table.SetCID(containerID) - - r := eacl.NewRecord() - r.SetOperation(eacl.OperationPut) - r.SetAction(eacl.ActionAllow) - - var target eacl.Target - target.SetRole(eacl.RoleOthers) - r.SetTargets(target) - table.AddRecord(r) - - return table -} - -func TestClientStatistic_AccountBalance(t *testing.T) { - signer := test.RandomSignerRFC6979(t) - ctx := context.Background() - c := newClient(t, nil) - - rpcAPIBalance = func(_ *client.Client, _ *accounting.BalanceRequest, _ ...client.CallOption) (*accounting.BalanceResponse, error) { - var resp accounting.BalanceResponse - var meta session.ResponseMetaHeader - var balance accounting.Decimal - var body accounting.BalanceResponseBody - - body.SetBalance(&balance) - - resp.SetBody(&body) - resp.SetMetaHeader(&meta) - - err := signServiceMessage(signer, &resp, nil) - if err != nil { - panic(fmt.Sprintf("sign response: %v", err)) - } - - return &resp, nil - } - - collector := newCollector() - c.prm.statisticCallback = collector.Collect - - var prm PrmBalanceGet - prm.SetAccount(*randAccount(signer)) - _, err := c.BalanceGet(ctx, prm) - require.NoError(t, err) - - require.Equal(t, 1, collector.methods[stat.MethodBalanceGet].requests) -} - -func TestClientStatistic_ContainerPut(t *testing.T) { - signer := test.RandomSignerRFC6979(t) - ctx := context.Background() - c := newClient(t, nil) - - rpcAPIPutContainer = func(_ *client.Client, _ *v2container.PutRequest, _ ...client.CallOption) (*v2container.PutResponse, error) { - var resp v2container.PutResponse - var meta session.ResponseMetaHeader - var body v2container.PutResponseBody - - body.SetContainerID(randRefsContainerID()) - - resp.SetBody(&body) - resp.SetMetaHeader(&meta) - - err := signServiceMessage(signer, &resp, nil) - if err != nil { - panic(fmt.Sprintf("sign response: %v", err)) - } - - return &resp, nil - } - - cont := prepareContainer(*randAccount(signer)) - - collector := newCollector() - c.prm.statisticCallback = collector.Collect - - var prm PrmContainerPut - _, err := c.ContainerPut(ctx, cont, signer, prm) - require.NoError(t, err) - - require.Equal(t, 1, collector.methods[stat.MethodContainerPut].requests) -} - -func TestClientStatistic_ContainerGet(t *testing.T) { - signer := test.RandomSignerRFC6979(t) - ctx := context.Background() - c := newClient(t, nil) - - rpcAPIGetContainer = func(_ *client.Client, _ *v2container.GetRequest, _ ...client.CallOption) (*v2container.GetResponse, error) { - var cont v2container.Container - var ver refs.Version - var placementPolicyV2 netmapv2.PlacementPolicy - var replicas []netmapv2.Replica - var resp v2container.GetResponse - var meta session.ResponseMetaHeader - - cont.SetOwnerID(randOwner(signer)) - cont.SetVersion(&ver) - - nonce, err := uuid.New().MarshalBinary() - require.NoError(t, err) - cont.SetNonce(nonce) - - replica := netmapv2.Replica{} - replica.SetCount(1) - replicas = append(replicas, replica) - placementPolicyV2.SetReplicas(replicas) - cont.SetPlacementPolicy(&placementPolicyV2) - - body := v2container.GetResponseBody{} - body.SetContainer(&cont) - - resp.SetBody(&body) - resp.SetMetaHeader(&meta) - - if err = signServiceMessage(signer, &resp, nil); err != nil { - panic(fmt.Sprintf("sign response: %v", err)) - } - - return &resp, nil - } - - collector := newCollector() - c.prm.statisticCallback = collector.Collect - - var prm PrmContainerGet - _, err := c.ContainerGet(ctx, cid.ID{}, prm) - require.NoError(t, err) - - require.Equal(t, 1, collector.methods[stat.MethodContainerGet].requests) -} - -func TestClientStatistic_ContainerList(t *testing.T) { - signer := test.RandomSignerRFC6979(t) - ctx := context.Background() - c := newClient(t, nil) - - rpcAPIListContainers = func(_ *client.Client, _ *v2container.ListRequest, _ ...client.CallOption) (*v2container.ListResponse, error) { - var resp v2container.ListResponse - var meta session.ResponseMetaHeader - var body v2container.ListResponseBody - - resp.SetBody(&body) - resp.SetMetaHeader(&meta) - - if err := signServiceMessage(signer, &resp, nil); err != nil { - panic(fmt.Sprintf("sign response: %v", err)) - } - - return &resp, nil - } - - collector := newCollector() - c.prm.statisticCallback = collector.Collect - - var prm PrmContainerList - _, err := c.ContainerList(ctx, *randAccount(signer), prm) - require.NoError(t, err) - - require.Equal(t, 1, collector.methods[stat.MethodContainerList].requests) -} - -func TestClientStatistic_ContainerDelete(t *testing.T) { - signer := test.RandomSignerRFC6979(t) - ctx := context.Background() - c := newClient(t, nil) - - rpcAPIDeleteContainer = func(_ *client.Client, _ *v2container.DeleteRequest, _ ...client.CallOption) (*v2container.PutResponse, error) { - var resp v2container.PutResponse - var meta session.ResponseMetaHeader - var body v2container.PutResponseBody - - resp.SetBody(&body) - resp.SetMetaHeader(&meta) - - if err := signServiceMessage(signer, &resp, nil); err != nil { - panic(fmt.Sprintf("sign response: %v", err)) - } - - return &resp, nil - } - - collector := newCollector() - c.prm.statisticCallback = collector.Collect - - var prm PrmContainerDelete - err := c.ContainerDelete(ctx, cid.ID{}, signer, prm) - require.NoError(t, err) - - require.Equal(t, 1, collector.methods[stat.MethodContainerDelete].requests) -} - -func TestClientStatistic_ContainerEacl(t *testing.T) { - signer := test.RandomSignerRFC6979(t) - ctx := context.Background() - c := newClient(t, nil) - - rpcAPIGetEACL = func(_ *client.Client, _ *v2container.GetExtendedACLRequest, _ ...client.CallOption) (*v2container.GetExtendedACLResponse, error) { - var resp v2container.GetExtendedACLResponse - var meta session.ResponseMetaHeader - var aclTable v2acl.Table - var body v2container.GetExtendedACLResponseBody - - body.SetEACL(&aclTable) - - resp.SetBody(&body) - resp.SetMetaHeader(&meta) - - if err := signServiceMessage(signer, &resp, nil); err != nil { - panic(fmt.Sprintf("sign response: %v", err)) - } - - return &resp, nil - } - - collector := newCollector() - c.prm.statisticCallback = collector.Collect - - var prm PrmContainerEACL - _, err := c.ContainerEACL(ctx, cid.ID{}, prm) - require.NoError(t, err) - - require.Equal(t, 1, collector.methods[stat.MethodContainerEACL].requests) -} - -func TestClientStatistic_ContainerSetEacl(t *testing.T) { - signer := test.RandomSignerRFC6979(t) - ctx := context.Background() - c := newClient(t, nil) - - rpcAPISetEACL = func(_ *client.Client, _ *v2container.SetExtendedACLRequest, _ ...client.CallOption) (*v2container.PutResponse, error) { - var resp v2container.PutResponse - var meta session.ResponseMetaHeader - var body v2container.PutResponseBody - - resp.SetBody(&body) - resp.SetMetaHeader(&meta) - - if err := signServiceMessage(signer, &resp, nil); err != nil { - panic(fmt.Sprintf("sign response: %v", err)) - } - - return &resp, nil - } - - collector := newCollector() - c.prm.statisticCallback = collector.Collect - - var prm PrmContainerSetEACL - table := testEaclTable(cid.ID{}) - err := c.ContainerSetEACL(ctx, table, signer, prm) - require.NoError(t, err) - - require.Equal(t, 1, collector.methods[stat.MethodContainerSetEACL].requests) -} - -func TestClientStatistic_ContainerAnnounceUsedSpace(t *testing.T) { - signer := test.RandomSignerRFC6979(t) - ctx := context.Background() - c := newClient(t, nil) - - rpcAPIAnnounceUsedSpace = func(_ *client.Client, _ *v2container.AnnounceUsedSpaceRequest, _ ...client.CallOption) (*v2container.PutResponse, error) { - var resp v2container.PutResponse - var meta session.ResponseMetaHeader - var body v2container.PutResponseBody - - resp.SetBody(&body) - resp.SetMetaHeader(&meta) - - if err := signServiceMessage(signer, &resp, nil); err != nil { - panic(fmt.Sprintf("sign response: %v", err)) - } - - return &resp, nil - } - - collector := newCollector() - c.prm.statisticCallback = collector.Collect - - estimation := container.SizeEstimation{} - estimation.SetContainer(*randContainerID()) - estimation.SetValue(mathRand.Uint64()) - estimation.SetEpoch(mathRand.Uint64()) - - var prm PrmAnnounceSpace - err := c.ContainerAnnounceUsedSpace(ctx, []container.SizeEstimation{estimation}, prm) - require.NoError(t, err) - - require.Equal(t, 1, collector.methods[stat.MethodContainerAnnounceUsedSpace].requests) -} - -func TestClientStatistic_ContainerSyncContainerWithNetwork(t *testing.T) { - signer := test.RandomSignerRFC6979(t) - ctx := context.Background() - c := newClient(t, nil) - - rpcAPINetworkInfo = func(_ *client.Client, _ *netmapv2.NetworkInfoRequest, _ ...client.CallOption) (*netmapv2.NetworkInfoResponse, error) { - var resp netmapv2.NetworkInfoResponse - var meta session.ResponseMetaHeader - var netInfo netmapv2.NetworkInfo - var netConfig netmapv2.NetworkConfig - var p1 netmapv2.NetworkParameter - - p1.SetKey(randBytes(10)) - p1.SetValue(randBytes(10)) - - netConfig.SetParameters(p1) - netInfo.SetNetworkConfig(&netConfig) - - body := netmapv2.NetworkInfoResponseBody{} - body.SetNetworkInfo(&netInfo) - - resp.SetBody(&body) - resp.SetMetaHeader(&meta) - - if err := signServiceMessage(signer, &resp, nil); err != nil { - panic(fmt.Sprintf("sign response: %v", err)) - } - - return &resp, nil - } - - collector := newCollector() - c.prm.statisticCallback = collector.Collect - - cont := prepareContainer(*randAccount(signer)) - - err := SyncContainerWithNetwork(ctx, &cont, c) - require.NoError(t, err) - - require.Equal(t, 1, collector.methods[stat.MethodNetworkInfo].requests) -} - -func TestClientStatistic_ContainerEndpointInfo(t *testing.T) { - signer := test.RandomSignerRFC6979(t) - ctx := context.Background() - c := newClient(t, nil) - - rpcAPILocalNodeInfo = func(_ *client.Client, _ *netmapv2.LocalNodeInfoRequest, _ ...client.CallOption) (*netmapv2.LocalNodeInfoResponse, error) { - var resp netmapv2.LocalNodeInfoResponse - var meta session.ResponseMetaHeader - var ver refs.Version - var nodeInfo netmapv2.NodeInfo - - nodeInfo.SetPublicKey(neofscrypto.PublicKeyBytes(signer.Public())) - nodeInfo.SetAddresses("https://some-endpont.com") - - body := netmapv2.LocalNodeInfoResponseBody{} - body.SetVersion(&ver) - body.SetNodeInfo(&nodeInfo) - - resp.SetBody(&body) - resp.SetMetaHeader(&meta) - - if err := signServiceMessage(signer, &resp, nil); err != nil { - panic(fmt.Sprintf("sign response: %v", err)) - } - - return &resp, nil - } - - collector := newCollector() - c.prm.statisticCallback = collector.Collect - - _, err := c.EndpointInfo(ctx, PrmEndpointInfo{}) - require.NoError(t, err) - - require.Equal(t, 1, collector.methods[stat.MethodEndpointInfo].requests) -} - -func TestClientStatistic_ContainerNetMapSnapshot(t *testing.T) { - signer := test.RandomSignerRFC6979(t) - ctx := context.Background() - c := newClient(t, nil) - - rpcAPINetMapSnapshot = func(_ *client.Client, _ *netmapv2.SnapshotRequest, _ ...client.CallOption) (*netmapv2.SnapshotResponse, error) { - var resp netmapv2.SnapshotResponse - var meta session.ResponseMetaHeader - var netMap netmapv2.NetMap - - body := netmapv2.SnapshotResponseBody{} - body.SetNetMap(&netMap) - - resp.SetBody(&body) - resp.SetMetaHeader(&meta) - - if err := signServiceMessage(signer, &resp, nil); err != nil { - panic(fmt.Sprintf("sign response: %v", err)) - } - - return &resp, nil - } - - collector := newCollector() - c.prm.statisticCallback = collector.Collect - c.setNeoFSAPIServer((*coreServer)(&c.c)) - - _, err := c.NetMapSnapshot(ctx, PrmNetMapSnapshot{}) - require.NoError(t, err) - - require.Equal(t, 1, collector.methods[stat.MethodNetMapSnapshot].requests) -} - -func TestClientStatistic_CreateSession(t *testing.T) { - signer := test.RandomSignerRFC6979(t) - ctx := context.Background() - c := newClient(t, nil) - - rpcAPICreateSession = func(_ *client.Client, _ *session.CreateRequest, _ ...client.CallOption) (*session.CreateResponse, error) { - var resp session.CreateResponse - var meta session.ResponseMetaHeader - - body := session.CreateResponseBody{} - body.SetID(randBytes(10)) - - body.SetSessionKey(neofscrypto.PublicKeyBytes(signer.Public())) - - resp.SetBody(&body) - resp.SetMetaHeader(&meta) - - if err := signServiceMessage(signer, &resp, nil); err != nil { - panic(fmt.Sprintf("sign response: %v", err)) - } - - return &resp, nil - } - - collector := newCollector() - c.prm.statisticCallback = collector.Collect - c.setNeoFSAPIServer((*coreServer)(&c.c)) - - var prm PrmSessionCreate - - _, err := c.SessionCreate(ctx, signer, prm) - require.NoError(t, err) - - require.Equal(t, 1, collector.methods[stat.MethodSessionCreate].requests) -} - -func TestClientStatistic_ObjectPut(t *testing.T) { - t.Skip("need changes to api-go, to set `wc client.MessageWriterCloser` in rpcapi.PutRequestWriter") - - signer := test.RandomSignerRFC6979(t) - ctx := context.Background() - c := newClient(t, nil) - - rpcAPIPutObject = func(_ *client.Client, _ *v2object.PutResponse, _ ...client.CallOption) (objectWriter, error) { - var resp rpcapi.PutRequestWriter - - return &resp, nil - } - - containerID := *randContainerID() - account := randAccount(signer) - - collector := newCollector() - c.prm.statisticCallback = collector.Collect - c.setNeoFSAPIServer((*coreServer)(&c.c)) - - var tokenSession session2.Object - tokenSession.SetID(uuid.New()) - tokenSession.SetExp(1) - tokenSession.BindContainer(containerID) - tokenSession.ForVerb(session2.VerbObjectPut) - tokenSession.SetAuthKey(signer.Public()) - tokenSession.SetIssuer(*account) - - err := tokenSession.Sign(signer) - require.NoError(t, err) - - var prm PrmObjectPutInit - prm.WithinSession(tokenSession) - - var hdr object.Object - hdr.SetOwnerID(account) - hdr.SetContainerID(containerID) - - writer, err := c.ObjectPutInit(ctx, hdr, signer, prm) - require.NoError(t, err) - - _, err = writer.Write(randBytes(10)) - require.NoError(t, err) - - err = writer.Close() - require.NoError(t, err) - - require.Equal(t, 2, collector.methods[stat.MethodObjectPut].requests) -} - -func TestClientStatistic_ObjectDelete(t *testing.T) { - signer := test.RandomSignerRFC6979(t) - ctx := context.Background() - c := newClient(t, nil) - - rpcAPIDeleteObject = func(_ *client.Client, _ *v2object.DeleteRequest, _ ...client.CallOption) (*v2object.DeleteResponse, error) { - var resp v2object.DeleteResponse - var meta session.ResponseMetaHeader - var body v2object.DeleteResponseBody - var addr refs.Address - var objID refs.ObjectID - var contID = randRefsContainerID() - - objID.SetValue(randBytes(32)) - - addr.SetContainerID(contID) - addr.SetObjectID(&objID) - - body.SetTombstone(&addr) - - resp.SetBody(&body) - resp.SetMetaHeader(&meta) - - if err := signServiceMessage(signer, &resp, nil); err != nil { - panic(fmt.Sprintf("sign response: %v", err)) - } - - return &resp, nil - } - - containerID := *randContainerID() - objectID := oid.ID{} - - collector := newCollector() - c.prm.statisticCallback = collector.Collect - - var prm PrmObjectDelete - - _, err := c.ObjectDelete(ctx, containerID, objectID, signer, prm) - require.NoError(t, err) - - require.Equal(t, 1, collector.methods[stat.MethodObjectDelete].requests) -} - -func TestClientStatistic_ObjectGet(t *testing.T) { - t.Skip("need changes to api-go, to set `r client.MessageReader` in rpcapi.GetResponseReader") - - signer := test.RandomSignerRFC6979(t) - ctx := context.Background() - c := newClient(t, nil) - - rpcAPIGetObject = func(_ *client.Client, _ *v2object.GetRequest, _ ...client.CallOption) (*rpcapi.GetResponseReader, error) { - var resp rpcapi.GetResponseReader - - // todo: fill - - return &resp, nil - } - - containerID := *randContainerID() - objectID := oid.ID{} - - collector := newCollector() - c.prm.statisticCallback = collector.Collect - - var prm PrmObjectGet - - _, reader, err := c.ObjectGetInit(ctx, containerID, objectID, signer, prm) - require.NoError(t, err) - - buff := make([]byte, 32) - _, err = reader.Read(buff) - require.NoError(t, err) - - require.Equal(t, 2, collector.methods[stat.MethodObjectGet].requests) -} - -func TestClientStatistic_ObjectHead(t *testing.T) { - signer := test.RandomSignerRFC6979(t) - ctx := context.Background() - c := newClient(t, nil) - - rpcAPIHeadObject = func(_ *client.Client, _ *v2object.HeadRequest, _ ...client.CallOption) (*v2object.HeadResponse, error) { - var resp v2object.HeadResponse - var meta session.ResponseMetaHeader - var body v2object.HeadResponseBody - var headerPart v2object.HeaderWithSignature - - body.SetHeaderPart(&headerPart) - - resp.SetBody(&body) - resp.SetMetaHeader(&meta) - - if err := signServiceMessage(signer, &resp, nil); err != nil { - panic(fmt.Sprintf("sign response: %v", err)) - } - - return &resp, nil - } - - containerID := *randContainerID() - objectID := oid.ID{} - - collector := newCollector() - c.prm.statisticCallback = collector.Collect - - var prm PrmObjectHead - - _, err := c.ObjectHead(ctx, containerID, objectID, signer, prm) - require.NoError(t, err) - - require.Equal(t, 1, collector.methods[stat.MethodObjectHead].requests) -} - -func TestClientStatistic_ObjectRange(t *testing.T) { - t.Skip("need changes to api-go, to set `r client.MessageReader` in rpcapi.ObjectRangeResponseReader") - - signer := test.RandomSignerRFC6979(t) - ctx := context.Background() - c := newClient(t, nil) - - rpcAPIGetObjectRange = func(_ *client.Client, _ *v2object.GetRangeRequest, _ ...client.CallOption) (*rpcapi.ObjectRangeResponseReader, error) { - var resp rpcapi.ObjectRangeResponseReader - - // todo: fill - - return &resp, nil - } - - containerID := *randContainerID() - objectID := oid.ID{} - - collector := newCollector() - c.prm.statisticCallback = collector.Collect - - var prm PrmObjectRange - - reader, err := c.ObjectRangeInit(ctx, containerID, objectID, 0, 1, signer, prm) - require.NoError(t, err) - - buff := make([]byte, 32) - _, err = reader.Read(buff) - require.NoError(t, err) - - require.Equal(t, 2, collector.methods[stat.MethodObjectRange].requests) -} - -func TestClientStatistic_ObjectHash(t *testing.T) { - signer := test.RandomSignerRFC6979(t) - ctx := context.Background() - c := newClient(t, nil) - - rpcAPIHashObjectRange = func(_ *client.Client, _ *v2object.GetRangeHashRequest, _ ...client.CallOption) (*v2object.GetRangeHashResponse, error) { - var resp v2object.GetRangeHashResponse - var meta session.ResponseMetaHeader - var body v2object.GetRangeHashResponseBody - - body.SetHashList([][]byte{ - randBytes(4), - }) - - resp.SetBody(&body) - resp.SetMetaHeader(&meta) - - if err := signServiceMessage(signer, &resp, nil); err != nil { - panic(fmt.Sprintf("sign response: %v", err)) - } - - return &resp, nil - } - - containerID := *randContainerID() - objectID := oid.ID{} - - collector := newCollector() - c.prm.statisticCallback = collector.Collect - - var prm PrmObjectHash - prm.SetRangeList(0, 2) - - _, err := c.ObjectHash(ctx, containerID, objectID, signer, prm) - require.NoError(t, err) - - require.Equal(t, 1, collector.methods[stat.MethodObjectHash].requests) -} - -func TestClientStatistic_ObjectSearch(t *testing.T) { - t.Skip("need changes to api-go, to set `r client.MessageReader` in rpcapi.SearchResponseReader") - - signer := test.RandomSignerRFC6979(t) - ctx := context.Background() - c := newClient(t, nil) - - rpcAPISearchObjects = func(_ *client.Client, _ *v2object.SearchRequest, _ ...client.CallOption) (*rpcapi.SearchResponseReader, error) { - var resp rpcapi.SearchResponseReader - - // todo: fill - - return &resp, nil - } - - containerID := *randContainerID() - - collector := newCollector() - c.prm.statisticCallback = collector.Collect - - var prm PrmObjectSearch - - reader, err := c.ObjectSearchInit(ctx, containerID, signer, prm) - require.NoError(t, err) - - iterator := func(oid.ID) bool { - return false - } - - err = reader.Iterate(iterator) - require.NoError(t, err) - - require.Equal(t, 2, collector.methods[stat.MethodObjectSearch].requests) -} - -func TestClientStatistic_AnnounceIntermediateTrust(t *testing.T) { - signer := test.RandomSignerRFC6979(t) - ctx := context.Background() - c := newClient(t, nil) - - rpcAPIAnnounceIntermediateResult = func(_ *client.Client, _ *reputation.AnnounceIntermediateResultRequest, _ ...client.CallOption) (*reputation.AnnounceIntermediateResultResponse, error) { - var resp reputation.AnnounceIntermediateResultResponse - var meta session.ResponseMetaHeader - var body reputation.AnnounceIntermediateResultResponseBody - - resp.SetBody(&body) - resp.SetMetaHeader(&meta) - - if err := signServiceMessage(signer, &resp, nil); err != nil { - panic(fmt.Sprintf("sign response: %v", err)) - } - - return &resp, nil - } - - collector := newCollector() - c.prm.statisticCallback = collector.Collect - - var trust reputation2.PeerToPeerTrust - var prm PrmAnnounceIntermediateTrust - - err := c.AnnounceIntermediateTrust(ctx, 1, trust, prm) - require.NoError(t, err) - - require.Equal(t, 1, collector.methods[stat.MethodAnnounceIntermediateTrust].requests) -} - -func TestClientStatistic_MethodAnnounceLocalTrust(t *testing.T) { - signer := test.RandomSignerRFC6979(t) - ctx := context.Background() - c := newClient(t, nil) - - rpcAPIAnnounceLocalTrust = func(_ *client.Client, _ *reputation.AnnounceLocalTrustRequest, _ ...client.CallOption) (*reputation.AnnounceLocalTrustResponse, error) { - var resp reputation.AnnounceLocalTrustResponse - var meta session.ResponseMetaHeader - var body reputation.AnnounceLocalTrustResponseBody - - resp.SetBody(&body) - resp.SetMetaHeader(&meta) - - if err := signServiceMessage(signer, &resp, nil); err != nil { - panic(fmt.Sprintf("sign response: %v", err)) - } - - return &resp, nil - } - - collector := newCollector() - c.prm.statisticCallback = collector.Collect - - var peer reputation2.PeerID - var trust reputation2.Trust - trust.SetPeer(peer) - - var prm PrmAnnounceLocalTrust - - err := c.AnnounceLocalTrust(ctx, 1, []reputation2.Trust{trust}, prm) - require.NoError(t, err) - - require.Equal(t, 1, collector.methods[stat.MethodAnnounceLocalTrust].requests) -} +// type ( +// methodStatistic struct { +// requests int +// errors int +// duration time.Duration +// } +// +// testStatCollector struct { +// methods map[stat.Method]*methodStatistic +// } +// ) +// +// func newCollector() *testStatCollector { +// c := testStatCollector{ +// methods: make(map[stat.Method]*methodStatistic), +// } +// +// for i := stat.MethodBalanceGet; i < stat.MethodLast; i++ { +// c.methods[i] = &methodStatistic{} +// } +// +// return &c +// } +// +// func (c *testStatCollector) Collect(_ []byte, _ string, method stat.Method, duration time.Duration, err error) { +// data, ok := c.methods[method] +// if ok { +// data.duration += duration +// if duration > 0 { +// data.requests++ +// } +// +// if err != nil { +// data.errors++ +// } +// } +// } +// +// func randBytes(l int) []byte { +// r := make([]byte, l) +// _, _ = rand.Read(r) +// +// return r +// } +// +// func randRefsContainerID() *refs.ContainerID { +// var id refs.ContainerID +// id.SetValue(randBytes(sha256.Size)) +// return &id +// } +// +// func randContainerID() *cid.ID { +// var refID refs.ContainerID +// refID.SetValue(randBytes(sha256.Size)) +// +// var id cid.ID +// _ = id.ReadFromV2(refID) +// +// return &id +// } +// +// func randAccount(signer user.Signer) *user.ID { +// u := signer.UserID() +// +// return &u +// } +// +// func randOwner(signer user.Signer) *refs.OwnerID { +// acc := randAccount(signer) +// +// var u refs.OwnerID +// acc.WriteToV2(&u) +// +// return &u +// } +// +// func prepareContainer(accountID user.ID) container.Container { +// cont := container.Container{} +// cont.Init() +// cont.SetOwner(accountID) +// cont.SetBasicACL(acl.PublicRW) +// +// cont.SetName(strconv.FormatInt(time.Now().UnixNano(), 16)) +// cont.SetCreationTime(time.Now().UTC()) +// +// var pp netmap.PlacementPolicy +// var rd netmap.ReplicaDescriptor +// rd.SetNumberOfObjects(1) +// +// pp.SetContainerBackupFactor(1) +// pp.SetReplicas([]netmap.ReplicaDescriptor{rd}) +// cont.SetPlacementPolicy(pp) +// +// return cont +// } +// +// func testEaclTable(containerID cid.ID) eacl.Table { +// var table eacl.Table +// table.SetCID(containerID) +// +// r := eacl.NewRecord() +// r.SetOperation(eacl.OperationPut) +// r.SetAction(eacl.ActionAllow) +// +// var target eacl.Target +// target.SetRole(eacl.RoleOthers) +// r.SetTargets(target) +// table.AddRecord(r) +// +// return table +// } +// +// func TestClientStatistic_AccountBalance(t *testing.T) { +// signer := test.RandomSignerRFC6979(t) +// ctx := context.Background() +// c := newClient(t, nil) +// +// rpcAPIBalance = func(_ *client.Client, _ *accounting.BalanceRequest, _ ...client.CallOption) (*accounting.BalanceResponse, error) { +// var resp accounting.BalanceResponse +// var meta session.ResponseMetaHeader +// var balance accounting.Decimal +// var body accounting.BalanceResponseBody +// +// body.SetBalance(&balance) +// +// resp.SetBody(&body) +// resp.SetMetaHeader(&meta) +// +// err := signServiceMessage(signer, &resp, nil) +// if err != nil { +// panic(fmt.Sprintf("sign response: %v", err)) +// } +// +// return &resp, nil +// } +// +// collector := newCollector() +// c.prm.statisticCallback = collector.Collect +// +// var prm PrmBalanceGet +// prm.SetAccount(*randAccount(signer)) +// _, err := c.BalanceGet(ctx, prm) +// require.NoError(t, err) +// +// require.Equal(t, 1, collector.methods[stat.MethodBalanceGet].requests) +// } +// +// func TestClientStatistic_ContainerPut(t *testing.T) { +// signer := test.RandomSignerRFC6979(t) +// ctx := context.Background() +// c := newClient(t, nil) +// +// rpcAPIPutContainer = func(_ *client.Client, _ *v2container.PutRequest, _ ...client.CallOption) (*v2container.PutResponse, error) { +// var resp v2container.PutResponse +// var meta session.ResponseMetaHeader +// var body v2container.PutResponseBody +// +// body.SetContainerID(randRefsContainerID()) +// +// resp.SetBody(&body) +// resp.SetMetaHeader(&meta) +// +// err := signServiceMessage(signer, &resp, nil) +// if err != nil { +// panic(fmt.Sprintf("sign response: %v", err)) +// } +// +// return &resp, nil +// } +// +// cont := prepareContainer(*randAccount(signer)) +// +// collector := newCollector() +// c.prm.statisticCallback = collector.Collect +// +// var prm PrmContainerPut +// _, err := c.ContainerPut(ctx, cont, signer, prm) +// require.NoError(t, err) +// +// require.Equal(t, 1, collector.methods[stat.MethodContainerPut].requests) +// } +// +// func TestClientStatistic_ContainerGet(t *testing.T) { +// signer := test.RandomSignerRFC6979(t) +// ctx := context.Background() +// c := newClient(t, nil) +// +// rpcAPIGetContainer = func(_ *client.Client, _ *v2container.GetRequest, _ ...client.CallOption) (*v2container.GetResponse, error) { +// var cont v2container.Container +// var ver refs.Version +// var placementPolicyV2 netmapv2.PlacementPolicy +// var replicas []netmapv2.Replica +// var resp v2container.GetResponse +// var meta session.ResponseMetaHeader +// +// cont.SetOwnerID(randOwner(signer)) +// cont.SetVersion(&ver) +// +// nonce, err := uuid.New().MarshalBinary() +// require.NoError(t, err) +// cont.SetNonce(nonce) +// +// replica := netmapv2.Replica{} +// replica.SetCount(1) +// replicas = append(replicas, replica) +// placementPolicyV2.SetReplicas(replicas) +// cont.SetPlacementPolicy(&placementPolicyV2) +// +// body := v2container.GetResponseBody{} +// body.SetContainer(&cont) +// +// resp.SetBody(&body) +// resp.SetMetaHeader(&meta) +// +// if err = signServiceMessage(signer, &resp, nil); err != nil { +// panic(fmt.Sprintf("sign response: %v", err)) +// } +// +// return &resp, nil +// } +// +// collector := newCollector() +// c.prm.statisticCallback = collector.Collect +// +// var prm PrmContainerGet +// _, err := c.ContainerGet(ctx, cid.ID{}, prm) +// require.NoError(t, err) +// +// require.Equal(t, 1, collector.methods[stat.MethodContainerGet].requests) +// } +// +// func TestClientStatistic_ContainerList(t *testing.T) { +// signer := test.RandomSignerRFC6979(t) +// ctx := context.Background() +// c := newClient(t, nil) +// +// rpcAPIListContainers = func(_ *client.Client, _ *v2container.ListRequest, _ ...client.CallOption) (*v2container.ListResponse, error) { +// var resp v2container.ListResponse +// var meta session.ResponseMetaHeader +// var body v2container.ListResponseBody +// +// resp.SetBody(&body) +// resp.SetMetaHeader(&meta) +// +// if err := signServiceMessage(signer, &resp, nil); err != nil { +// panic(fmt.Sprintf("sign response: %v", err)) +// } +// +// return &resp, nil +// } +// +// collector := newCollector() +// c.prm.statisticCallback = collector.Collect +// +// var prm PrmContainerList +// _, err := c.ContainerList(ctx, *randAccount(signer), prm) +// require.NoError(t, err) +// +// require.Equal(t, 1, collector.methods[stat.MethodContainerList].requests) +// } +// +// func TestClientStatistic_ContainerDelete(t *testing.T) { +// signer := test.RandomSignerRFC6979(t) +// ctx := context.Background() +// c := newClient(t, nil) +// +// rpcAPIDeleteContainer = func(_ *client.Client, _ *v2container.DeleteRequest, _ ...client.CallOption) (*v2container.PutResponse, error) { +// var resp v2container.PutResponse +// var meta session.ResponseMetaHeader +// var body v2container.PutResponseBody +// +// resp.SetBody(&body) +// resp.SetMetaHeader(&meta) +// +// if err := signServiceMessage(signer, &resp, nil); err != nil { +// panic(fmt.Sprintf("sign response: %v", err)) +// } +// +// return &resp, nil +// } +// +// collector := newCollector() +// c.prm.statisticCallback = collector.Collect +// +// var prm PrmContainerDelete +// err := c.ContainerDelete(ctx, cid.ID{}, signer, prm) +// require.NoError(t, err) +// +// require.Equal(t, 1, collector.methods[stat.MethodContainerDelete].requests) +// } +// +// func TestClientStatistic_ContainerEacl(t *testing.T) { +// signer := test.RandomSignerRFC6979(t) +// ctx := context.Background() +// c := newClient(t, nil) +// +// rpcAPIGetEACL = func(_ *client.Client, _ *v2container.GetExtendedACLRequest, _ ...client.CallOption) (*v2container.GetExtendedACLResponse, error) { +// var resp v2container.GetExtendedACLResponse +// var meta session.ResponseMetaHeader +// var aclTable v2acl.Table +// var body v2container.GetExtendedACLResponseBody +// +// body.SetEACL(&aclTable) +// +// resp.SetBody(&body) +// resp.SetMetaHeader(&meta) +// +// if err := signServiceMessage(signer, &resp, nil); err != nil { +// panic(fmt.Sprintf("sign response: %v", err)) +// } +// +// return &resp, nil +// } +// +// collector := newCollector() +// c.prm.statisticCallback = collector.Collect +// +// var prm PrmContainerEACL +// _, err := c.ContainerEACL(ctx, cid.ID{}, prm) +// require.NoError(t, err) +// +// require.Equal(t, 1, collector.methods[stat.MethodContainerEACL].requests) +// } +// +// func TestClientStatistic_ContainerSetEacl(t *testing.T) { +// signer := test.RandomSignerRFC6979(t) +// ctx := context.Background() +// c := newClient(t, nil) +// +// rpcAPISetEACL = func(_ *client.Client, _ *v2container.SetExtendedACLRequest, _ ...client.CallOption) (*v2container.PutResponse, error) { +// var resp v2container.PutResponse +// var meta session.ResponseMetaHeader +// var body v2container.PutResponseBody +// +// resp.SetBody(&body) +// resp.SetMetaHeader(&meta) +// +// if err := signServiceMessage(signer, &resp, nil); err != nil { +// panic(fmt.Sprintf("sign response: %v", err)) +// } +// +// return &resp, nil +// } +// +// collector := newCollector() +// c.prm.statisticCallback = collector.Collect +// +// var prm PrmContainerSetEACL +// table := testEaclTable(cid.ID{}) +// err := c.ContainerSetEACL(ctx, table, signer, prm) +// require.NoError(t, err) +// +// require.Equal(t, 1, collector.methods[stat.MethodContainerSetEACL].requests) +// } +// +// func TestClientStatistic_ContainerAnnounceUsedSpace(t *testing.T) { +// signer := test.RandomSignerRFC6979(t) +// ctx := context.Background() +// c := newClient(t, nil) +// +// rpcAPIAnnounceUsedSpace = func(_ *client.Client, _ *v2container.AnnounceUsedSpaceRequest, _ ...client.CallOption) (*v2container.PutResponse, error) { +// var resp v2container.PutResponse +// var meta session.ResponseMetaHeader +// var body v2container.PutResponseBody +// +// resp.SetBody(&body) +// resp.SetMetaHeader(&meta) +// +// if err := signServiceMessage(signer, &resp, nil); err != nil { +// panic(fmt.Sprintf("sign response: %v", err)) +// } +// +// return &resp, nil +// } +// +// collector := newCollector() +// c.prm.statisticCallback = collector.Collect +// +// estimation := container.SizeEstimation{} +// estimation.SetContainer(*randContainerID()) +// estimation.SetValue(mathRand.Uint64()) +// estimation.SetEpoch(mathRand.Uint64()) +// +// var prm PrmAnnounceSpace +// err := c.ContainerAnnounceUsedSpace(ctx, []container.SizeEstimation{estimation}, prm) +// require.NoError(t, err) +// +// require.Equal(t, 1, collector.methods[stat.MethodContainerAnnounceUsedSpace].requests) +// } +// +// func TestClientStatistic_ContainerSyncContainerWithNetwork(t *testing.T) { +// signer := test.RandomSignerRFC6979(t) +// ctx := context.Background() +// c := newClient(t, nil) +// +// rpcAPINetworkInfo = func(_ *client.Client, _ *netmapv2.NetworkInfoRequest, _ ...client.CallOption) (*netmapv2.NetworkInfoResponse, error) { +// var resp netmapv2.NetworkInfoResponse +// var meta session.ResponseMetaHeader +// var netInfo netmapv2.NetworkInfo +// var netConfig netmapv2.NetworkConfig +// var p1 netmapv2.NetworkParameter +// +// p1.SetKey(randBytes(10)) +// p1.SetValue(randBytes(10)) +// +// netConfig.SetParameters(p1) +// netInfo.SetNetworkConfig(&netConfig) +// +// body := netmapv2.NetworkInfoResponseBody{} +// body.SetNetworkInfo(&netInfo) +// +// resp.SetBody(&body) +// resp.SetMetaHeader(&meta) +// +// if err := signServiceMessage(signer, &resp, nil); err != nil { +// panic(fmt.Sprintf("sign response: %v", err)) +// } +// +// return &resp, nil +// } +// +// collector := newCollector() +// c.prm.statisticCallback = collector.Collect +// +// cont := prepareContainer(*randAccount(signer)) +// +// err := SyncContainerWithNetwork(ctx, &cont, c) +// require.NoError(t, err) +// +// require.Equal(t, 1, collector.methods[stat.MethodNetworkInfo].requests) +// } +// +// func TestClientStatistic_ContainerEndpointInfo(t *testing.T) { +// signer := test.RandomSignerRFC6979(t) +// ctx := context.Background() +// c := newClient(t, nil) +// +// rpcAPILocalNodeInfo = func(_ *client.Client, _ *netmapv2.LocalNodeInfoRequest, _ ...client.CallOption) (*netmapv2.LocalNodeInfoResponse, error) { +// var resp netmapv2.LocalNodeInfoResponse +// var meta session.ResponseMetaHeader +// var ver refs.Version +// var nodeInfo netmapv2.NodeInfo +// +// nodeInfo.SetPublicKey(neofscrypto.PublicKeyBytes(signer.Public())) +// nodeInfo.SetAddresses("https://some-endpont.com") +// +// body := netmapv2.LocalNodeInfoResponseBody{} +// body.SetVersion(&ver) +// body.SetNodeInfo(&nodeInfo) +// +// resp.SetBody(&body) +// resp.SetMetaHeader(&meta) +// +// if err := signServiceMessage(signer, &resp, nil); err != nil { +// panic(fmt.Sprintf("sign response: %v", err)) +// } +// +// return &resp, nil +// } +// +// collector := newCollector() +// c.prm.statisticCallback = collector.Collect +// +// _, err := c.EndpointInfo(ctx, PrmEndpointInfo{}) +// require.NoError(t, err) +// +// require.Equal(t, 1, collector.methods[stat.MethodEndpointInfo].requests) +// } +// +// func TestClientStatistic_ContainerNetMapSnapshot(t *testing.T) { +// signer := test.RandomSignerRFC6979(t) +// ctx := context.Background() +// c := newClient(t, nil) +// +// rpcAPINetMapSnapshot = func(_ *client.Client, _ *netmapv2.SnapshotRequest, _ ...client.CallOption) (*netmapv2.SnapshotResponse, error) { +// var resp netmapv2.SnapshotResponse +// var meta session.ResponseMetaHeader +// var netMap netmapv2.NetMap +// +// body := netmapv2.SnapshotResponseBody{} +// body.SetNetMap(&netMap) +// +// resp.SetBody(&body) +// resp.SetMetaHeader(&meta) +// +// if err := signServiceMessage(signer, &resp, nil); err != nil { +// panic(fmt.Sprintf("sign response: %v", err)) +// } +// +// return &resp, nil +// } +// +// collector := newCollector() +// c.prm.statisticCallback = collector.Collect +// c.setNeoFSAPIServer((*coreServer)(&c.c)) +// +// _, err := c.NetMapSnapshot(ctx, PrmNetMapSnapshot{}) +// require.NoError(t, err) +// +// require.Equal(t, 1, collector.methods[stat.MethodNetMapSnapshot].requests) +// } +// +// func TestClientStatistic_CreateSession(t *testing.T) { +// signer := test.RandomSignerRFC6979(t) +// ctx := context.Background() +// c := newClient(t, nil) +// +// rpcAPICreateSession = func(_ *client.Client, _ *session.CreateRequest, _ ...client.CallOption) (*session.CreateResponse, error) { +// var resp session.CreateResponse +// var meta session.ResponseMetaHeader +// +// body := session.CreateResponseBody{} +// body.SetID(randBytes(10)) +// +// body.SetSessionKey(neofscrypto.PublicKeyBytes(signer.Public())) +// +// resp.SetBody(&body) +// resp.SetMetaHeader(&meta) +// +// if err := signServiceMessage(signer, &resp, nil); err != nil { +// panic(fmt.Sprintf("sign response: %v", err)) +// } +// +// return &resp, nil +// } +// +// collector := newCollector() +// c.prm.statisticCallback = collector.Collect +// c.setNeoFSAPIServer((*coreServer)(&c.c)) +// +// var prm PrmSessionCreate +// +// _, err := c.SessionCreate(ctx, signer, prm) +// require.NoError(t, err) +// +// require.Equal(t, 1, collector.methods[stat.MethodSessionCreate].requests) +// } +// +// func TestClientStatistic_ObjectPut(t *testing.T) { +// t.Skip("need changes to api-go, to set `wc client.MessageWriterCloser` in rpcapi.PutRequestWriter") +// +// signer := test.RandomSignerRFC6979(t) +// ctx := context.Background() +// c := newClient(t, nil) +// +// rpcAPIPutObject = func(_ *client.Client, _ *v2object.PutResponse, _ ...client.CallOption) (objectWriter, error) { +// var resp rpcapi.PutRequestWriter +// +// return &resp, nil +// } +// +// containerID := *randContainerID() +// account := randAccount(signer) +// +// collector := newCollector() +// c.prm.statisticCallback = collector.Collect +// c.setNeoFSAPIServer((*coreServer)(&c.c)) +// +// var tokenSession session2.Object +// tokenSession.SetID(uuid.New()) +// tokenSession.SetExp(1) +// tokenSession.BindContainer(containerID) +// tokenSession.ForVerb(session2.VerbObjectPut) +// tokenSession.SetAuthKey(signer.Public()) +// tokenSession.SetIssuer(*account) +// +// err := tokenSession.Sign(signer) +// require.NoError(t, err) +// +// var prm PrmObjectPutInit +// prm.WithinSession(tokenSession) +// +// var hdr object.Object +// hdr.SetOwnerID(account) +// hdr.SetContainerID(containerID) +// +// writer, err := c.ObjectPutInit(ctx, hdr, signer, prm) +// require.NoError(t, err) +// +// _, err = writer.Write(randBytes(10)) +// require.NoError(t, err) +// +// err = writer.Close() +// require.NoError(t, err) +// +// require.Equal(t, 2, collector.methods[stat.MethodObjectPut].requests) +// } +// +// func TestClientStatistic_ObjectDelete(t *testing.T) { +// signer := test.RandomSignerRFC6979(t) +// ctx := context.Background() +// c := newClient(t, nil) +// +// rpcAPIDeleteObject = func(_ *client.Client, _ *v2object.DeleteRequest, _ ...client.CallOption) (*v2object.DeleteResponse, error) { +// var resp v2object.DeleteResponse +// var meta session.ResponseMetaHeader +// var body v2object.DeleteResponseBody +// var addr refs.Address +// var objID refs.ObjectID +// var contID = randRefsContainerID() +// +// objID.SetValue(randBytes(32)) +// +// addr.SetContainerID(contID) +// addr.SetObjectID(&objID) +// +// body.SetTombstone(&addr) +// +// resp.SetBody(&body) +// resp.SetMetaHeader(&meta) +// +// if err := signServiceMessage(signer, &resp, nil); err != nil { +// panic(fmt.Sprintf("sign response: %v", err)) +// } +// +// return &resp, nil +// } +// +// containerID := *randContainerID() +// objectID := oid.ID{} +// +// collector := newCollector() +// c.prm.statisticCallback = collector.Collect +// +// var prm PrmObjectDelete +// +// _, err := c.ObjectDelete(ctx, containerID, objectID, signer, prm) +// require.NoError(t, err) +// +// require.Equal(t, 1, collector.methods[stat.MethodObjectDelete].requests) +// } +// +// func TestClientStatistic_ObjectGet(t *testing.T) { +// t.Skip("need changes to api-go, to set `r client.MessageReader` in rpcapi.GetResponseReader") +// +// signer := test.RandomSignerRFC6979(t) +// ctx := context.Background() +// c := newClient(t, nil) +// +// rpcAPIGetObject = func(_ *client.Client, _ *v2object.GetRequest, _ ...client.CallOption) (*rpcapi.GetResponseReader, error) { +// var resp rpcapi.GetResponseReader +// +// // todo: fill +// +// return &resp, nil +// } +// +// containerID := *randContainerID() +// objectID := oid.ID{} +// +// collector := newCollector() +// c.prm.statisticCallback = collector.Collect +// +// var prm PrmObjectGet +// +// _, reader, err := c.ObjectGetInit(ctx, containerID, objectID, signer, prm) +// require.NoError(t, err) +// +// buff := make([]byte, 32) +// _, err = reader.Read(buff) +// require.NoError(t, err) +// +// require.Equal(t, 2, collector.methods[stat.MethodObjectGet].requests) +// } +// +// func TestClientStatistic_ObjectHead(t *testing.T) { +// signer := test.RandomSignerRFC6979(t) +// ctx := context.Background() +// c := newClient(t, nil) +// +// rpcAPIHeadObject = func(_ *client.Client, _ *v2object.HeadRequest, _ ...client.CallOption) (*v2object.HeadResponse, error) { +// var resp v2object.HeadResponse +// var meta session.ResponseMetaHeader +// var body v2object.HeadResponseBody +// var headerPart v2object.HeaderWithSignature +// +// body.SetHeaderPart(&headerPart) +// +// resp.SetBody(&body) +// resp.SetMetaHeader(&meta) +// +// if err := signServiceMessage(signer, &resp, nil); err != nil { +// panic(fmt.Sprintf("sign response: %v", err)) +// } +// +// return &resp, nil +// } +// +// containerID := *randContainerID() +// objectID := oid.ID{} +// +// collector := newCollector() +// c.prm.statisticCallback = collector.Collect +// +// var prm PrmObjectHead +// +// _, err := c.ObjectHead(ctx, containerID, objectID, signer, prm) +// require.NoError(t, err) +// +// require.Equal(t, 1, collector.methods[stat.MethodObjectHead].requests) +// } +// +// func TestClientStatistic_ObjectRange(t *testing.T) { +// t.Skip("need changes to api-go, to set `r client.MessageReader` in rpcapi.ObjectRangeResponseReader") +// +// signer := test.RandomSignerRFC6979(t) +// ctx := context.Background() +// c := newClient(t, nil) +// +// rpcAPIGetObjectRange = func(_ *client.Client, _ *v2object.GetRangeRequest, _ ...client.CallOption) (*rpcapi.ObjectRangeResponseReader, error) { +// var resp rpcapi.ObjectRangeResponseReader +// +// // todo: fill +// +// return &resp, nil +// } +// +// containerID := *randContainerID() +// objectID := oid.ID{} +// +// collector := newCollector() +// c.prm.statisticCallback = collector.Collect +// +// var prm PrmObjectRange +// +// reader, err := c.ObjectRangeInit(ctx, containerID, objectID, 0, 1, signer, prm) +// require.NoError(t, err) +// +// buff := make([]byte, 32) +// _, err = reader.Read(buff) +// require.NoError(t, err) +// +// require.Equal(t, 2, collector.methods[stat.MethodObjectRange].requests) +// } +// +// func TestClientStatistic_ObjectHash(t *testing.T) { +// signer := test.RandomSignerRFC6979(t) +// ctx := context.Background() +// c := newClient(t, nil) +// +// rpcAPIHashObjectRange = func(_ *client.Client, _ *v2object.GetRangeHashRequest, _ ...client.CallOption) (*v2object.GetRangeHashResponse, error) { +// var resp v2object.GetRangeHashResponse +// var meta session.ResponseMetaHeader +// var body v2object.GetRangeHashResponseBody +// +// body.SetHashList([][]byte{ +// randBytes(4), +// }) +// +// resp.SetBody(&body) +// resp.SetMetaHeader(&meta) +// +// if err := signServiceMessage(signer, &resp, nil); err != nil { +// panic(fmt.Sprintf("sign response: %v", err)) +// } +// +// return &resp, nil +// } +// +// containerID := *randContainerID() +// objectID := oid.ID{} +// +// collector := newCollector() +// c.prm.statisticCallback = collector.Collect +// +// var prm PrmObjectHash +// prm.SetRangeList(0, 2) +// +// _, err := c.ObjectHash(ctx, containerID, objectID, signer, prm) +// require.NoError(t, err) +// +// require.Equal(t, 1, collector.methods[stat.MethodObjectHash].requests) +// } +// +// func TestClientStatistic_ObjectSearch(t *testing.T) { +// t.Skip("need changes to api-go, to set `r client.MessageReader` in rpcapi.SearchResponseReader") +// +// signer := test.RandomSignerRFC6979(t) +// ctx := context.Background() +// c := newClient(t, nil) +// +// rpcAPISearchObjects = func(_ *client.Client, _ *v2object.SearchRequest, _ ...client.CallOption) (*rpcapi.SearchResponseReader, error) { +// var resp rpcapi.SearchResponseReader +// +// // todo: fill +// +// return &resp, nil +// } +// +// containerID := *randContainerID() +// +// collector := newCollector() +// c.prm.statisticCallback = collector.Collect +// +// var prm PrmObjectSearch +// +// reader, err := c.ObjectSearchInit(ctx, containerID, signer, prm) +// require.NoError(t, err) +// +// iterator := func(oid.ID) bool { +// return false +// } +// +// err = reader.Iterate(iterator) +// require.NoError(t, err) +// +// require.Equal(t, 2, collector.methods[stat.MethodObjectSearch].requests) +// } +// +// func TestClientStatistic_AnnounceIntermediateTrust(t *testing.T) { +// signer := test.RandomSignerRFC6979(t) +// ctx := context.Background() +// c := newClient(t, nil) +// +// rpcAPIAnnounceIntermediateResult = func(_ *client.Client, _ *reputation.AnnounceIntermediateResultRequest, _ ...client.CallOption) (*reputation.AnnounceIntermediateResultResponse, error) { +// var resp reputation.AnnounceIntermediateResultResponse +// var meta session.ResponseMetaHeader +// var body reputation.AnnounceIntermediateResultResponseBody +// +// resp.SetBody(&body) +// resp.SetMetaHeader(&meta) +// +// if err := signServiceMessage(signer, &resp, nil); err != nil { +// panic(fmt.Sprintf("sign response: %v", err)) +// } +// +// return &resp, nil +// } +// +// collector := newCollector() +// c.prm.statisticCallback = collector.Collect +// +// var trust reputation2.PeerToPeerTrust +// var prm PrmAnnounceIntermediateTrust +// +// err := c.AnnounceIntermediateTrust(ctx, 1, trust, prm) +// require.NoError(t, err) +// +// require.Equal(t, 1, collector.methods[stat.MethodAnnounceIntermediateTrust].requests) +// } +// +// func TestClientStatistic_MethodAnnounceLocalTrust(t *testing.T) { +// signer := test.RandomSignerRFC6979(t) +// ctx := context.Background() +// c := newClient(t, nil) +// +// rpcAPIAnnounceLocalTrust = func(_ *client.Client, _ *reputation.AnnounceLocalTrustRequest, _ ...client.CallOption) (*reputation.AnnounceLocalTrustResponse, error) { +// var resp reputation.AnnounceLocalTrustResponse +// var meta session.ResponseMetaHeader +// var body reputation.AnnounceLocalTrustResponseBody +// +// resp.SetBody(&body) +// resp.SetMetaHeader(&meta) +// +// if err := signServiceMessage(signer, &resp, nil); err != nil { +// panic(fmt.Sprintf("sign response: %v", err)) +// } +// +// return &resp, nil +// } +// +// collector := newCollector() +// c.prm.statisticCallback = collector.Collect +// +// var peer reputation2.PeerID +// var trust reputation2.Trust +// trust.SetPeer(peer) +// +// var prm PrmAnnounceLocalTrust +// +// err := c.AnnounceLocalTrust(ctx, 1, []reputation2.Trust{trust}, prm) +// require.NoError(t, err) +// +// require.Equal(t, 1, collector.methods[stat.MethodAnnounceLocalTrust].requests) +// } diff --git a/client/container_test.go b/client/container_test.go index 4f370673b..aef9bf544 100644 --- a/client/container_test.go +++ b/client/container_test.go @@ -1,49 +1,2838 @@ package client import ( + "bytes" "context" + "errors" + "fmt" + "math/rand" + "net" "testing" + "time" + apiacl "github.com/nspcc-dev/neofs-sdk-go/api/acl" + apicontainer "github.com/nspcc-dev/neofs-sdk-go/api/container" + apinetmap "github.com/nspcc-dev/neofs-sdk-go/api/netmap" + "github.com/nspcc-dev/neofs-sdk-go/api/refs" + apisession "github.com/nspcc-dev/neofs-sdk-go/api/session" + "github.com/nspcc-dev/neofs-sdk-go/api/status" + apistatus "github.com/nspcc-dev/neofs-sdk-go/client/status" "github.com/nspcc-dev/neofs-sdk-go/container" cid "github.com/nspcc-dev/neofs-sdk-go/container/id" + cidtest "github.com/nspcc-dev/neofs-sdk-go/container/id/test" + containertest "github.com/nspcc-dev/neofs-sdk-go/container/test" + neofscrypto "github.com/nspcc-dev/neofs-sdk-go/crypto" + neofsecdsa "github.com/nspcc-dev/neofs-sdk-go/crypto/ecdsa" + neofscryptotest "github.com/nspcc-dev/neofs-sdk-go/crypto/test" "github.com/nspcc-dev/neofs-sdk-go/eacl" + eacltest "github.com/nspcc-dev/neofs-sdk-go/eacl/test" + netmaptest "github.com/nspcc-dev/neofs-sdk-go/netmap/test" + "github.com/nspcc-dev/neofs-sdk-go/session" + sessiontest "github.com/nspcc-dev/neofs-sdk-go/session/test" + "github.com/nspcc-dev/neofs-sdk-go/stat" + "github.com/nspcc-dev/neofs-sdk-go/user" + usertest "github.com/nspcc-dev/neofs-sdk-go/user/test" + versiontest "github.com/nspcc-dev/neofs-sdk-go/version/test" + "github.com/stretchr/testify/assert" "github.com/stretchr/testify/require" + "google.golang.org/grpc" + "google.golang.org/grpc/test/bufconn" ) -func TestClient_Container(t *testing.T) { - c := newClient(t, nil) +type noOtherContainerCalls struct{} + +func (noOtherContainerCalls) Delete(context.Context, *apicontainer.DeleteRequest) (*apicontainer.DeleteResponse, error) { + panic("must not be called") +} + +func (noOtherContainerCalls) Get(context.Context, *apicontainer.GetRequest) (*apicontainer.GetResponse, error) { + panic("must not be called") +} + +func (noOtherContainerCalls) List(context.Context, *apicontainer.ListRequest) (*apicontainer.ListResponse, error) { + panic("must not be called") +} + +func (noOtherContainerCalls) SetExtendedACL(context.Context, *apicontainer.SetExtendedACLRequest) (*apicontainer.SetExtendedACLResponse, error) { + panic("must not be called") +} + +func (noOtherContainerCalls) GetExtendedACL(context.Context, *apicontainer.GetExtendedACLRequest) (*apicontainer.GetExtendedACLResponse, error) { + panic("must not be called") +} + +func (noOtherContainerCalls) AnnounceUsedSpace(context.Context, *apicontainer.AnnounceUsedSpaceRequest) (*apicontainer.AnnounceUsedSpaceResponse, error) { + panic("must not be called") +} + +func (noOtherContainerCalls) Put(context.Context, *apicontainer.PutRequest) (*apicontainer.PutResponse, error) { + panic("must not be called") +} + +type putContainerServer struct { + noOtherContainerCalls + // client + cnrBin []byte + creatorSigner neofscrypto.Signer + session *session.Container + // server + sleepDur time.Duration + endpointInfoOnDialServer + id cid.ID + errTransport error + modifyResp func(*apicontainer.PutResponse) + corruptRespSig func(*apicontainer.PutResponse) +} + +func (x putContainerServer) Put(ctx context.Context, req *apicontainer.PutRequest) (*apicontainer.PutResponse, error) { + if x.sleepDur > 0 { + time.Sleep(x.sleepDur) + } + if x.errTransport != nil { + return nil, x.errTransport + } + var sts status.Status + resp := apicontainer.PutResponse{ + MetaHeader: &apisession.ResponseMetaHeader{Status: &sts, Epoch: x.epoch}, + } + var err error + var cnr container.Container + sigScheme := refs.SignatureScheme(x.creatorSigner.Scheme()) + creatorPubKey := neofscrypto.PublicKeyBytes(x.creatorSigner.Public()) + if ctx == nil { + sts.Code, sts.Message = status.InternalServerError, "nil context" + } else if req == nil { + sts.Code, sts.Message = status.InternalServerError, "nil request" + } else if err = neofscrypto.VerifyRequest(req, req.Body); err != nil { + sts.Code, sts.Message = status.SignatureVerificationFail, err.Error() + } else if req.VerifyHeader.BodySignature.Scheme != sigScheme || + !bytes.Equal(req.VerifyHeader.BodySignature.Key, creatorPubKey) { + sts.Code, sts.Message = status.InternalServerError, "[test] unexpected request body signature credentials" + } else if req.VerifyHeader.MetaSignature.Scheme != sigScheme || + !bytes.Equal(req.VerifyHeader.MetaSignature.Key, creatorPubKey) { + sts.Code, sts.Message = status.InternalServerError, "[test] unexpected request meta header signature credentials" + } else if req.VerifyHeader.OriginSignature.Scheme != sigScheme || + !bytes.Equal(req.VerifyHeader.OriginSignature.Key, creatorPubKey) { + sts.Code, sts.Message = status.InternalServerError, "[test] unexpected origin request verification header signature credentials" + } else if req.Body == nil { + sts.Code, sts.Message = status.InternalServerError, "invalid request: missing body" + } else if req.Body.Container == nil { + sts.Code, sts.Message = status.InternalServerError, "invalid request: invalid body: missing container" + } else if err = cnr.ReadFromV2(req.Body.Container); err != nil { + sts.Code, sts.Message = status.InternalServerError, fmt.Sprintf("invalid request: invalid body: invalid container field: %s", err) + } else if !bytes.Equal(cnr.Marshal(), x.cnrBin) { + sts.Code, sts.Message = status.InternalServerError, "[test] wrong container" + } else if req.Body.Signature == nil { + sts.Code, sts.Message = status.InternalServerError, "invalid request: invalid body: missing container signature" + } else if !bytes.Equal(req.Body.Signature.Key, creatorPubKey) { + sts.Code, sts.Message = status.InternalServerError, "[test] public key in request body differs with the creator's one" + } else if !x.creatorSigner.Public().Verify(x.cnrBin, req.Body.Signature.Sign) { + sts.Code, sts.Message = status.InternalServerError, "[test] wrong container signature" + } else if x.session != nil { + var sc session.Container + if req.MetaHeader == nil { + sts.Code, sts.Message = status.InternalServerError, "[test] missing request meta header" + } else if req.MetaHeader.SessionToken == nil { + sts.Code, sts.Message = status.InternalServerError, "[test] missing session token" + } else if err = sc.ReadFromV2(req.MetaHeader.SessionToken); err != nil { + sts.Code, sts.Message = status.InternalServerError, fmt.Sprintf("invalid request: invalid meta header: invalid session token: %v", err) + } else if !bytes.Equal(sc.Marshal(), x.session.Marshal()) { + sts.Code, sts.Message = status.InternalServerError, "[test] session token in request differs with the input one" + } + } else if req.MetaHeader != nil { + sts.Code, sts.Message = status.InternalServerError, "invalid request: meta header is set" + } + if sts.Code == 0 { + resp.MetaHeader.Status = nil + resp.Body = &apicontainer.PutResponse_Body{ContainerId: new(refs.ContainerID)} + x.id.WriteToV2(resp.Body.ContainerId) + } + if x.modifyResp != nil { + x.modifyResp(&resp) + } + resp.VerifyHeader, err = neofscrypto.SignResponse(x.serverSigner, &resp, resp.Body, nil) + if err != nil { + return nil, fmt.Errorf("sign response: %w", err) + } + if x.corruptRespSig != nil { + x.corruptRespSig(&resp) + } + return &resp, nil +} + +func TestClient_PutContainer(t *testing.T) { ctx := context.Background() + var srv putContainerServer + srv.sleepDur = 10 * time.Millisecond + srv.serverSigner = neofscryptotest.RandomSigner() + srv.latestVersion = versiontest.Version() + srv.nodeInfo = netmaptest.NodeInfo() + srv.nodeInfo.SetPublicKey(neofscrypto.PublicKeyBytes(srv.serverSigner.Public())) + cnr := containertest.Container() + srv.cnrBin = cnr.Marshal() + srv.creatorSigner = neofscryptotest.RandomSignerRFC6979() + srv.id = cidtest.ID() + _dial := func(t testing.TB, srv *putContainerServer, assertErr func(error), customizeOpts func(*Options)) (*Client, *bool) { + var opts Options + var handlerCalled bool + opts.SetAPIRequestResultHandler(func(nodeKey []byte, endpoint string, op stat.Method, dur time.Duration, err error) { + handlerCalled = true + require.Equal(t, srv.nodeInfo.PublicKey(), nodeKey) + require.Equal(t, "localhost:8080", endpoint) + require.Equal(t, stat.MethodContainerPut, op) + require.Greater(t, dur, srv.sleepDur) + assertErr(err) + }) + if customizeOpts != nil { + customizeOpts(&opts) + } + + c, err := New(anyValidURI, opts) + require.NoError(t, err) + + conn := bufconn.Listen(10 << 10) + gs := grpc.NewServer() + apinetmap.RegisterNetmapServiceServer(gs, srv) + apicontainer.RegisterContainerServiceServer(gs, srv) + go func() { _ = gs.Serve(conn) }() + t.Cleanup(gs.Stop) + + c.dial = func(ctx context.Context, _ string) (net.Conn, error) { return conn.DialContext(ctx) } + require.NoError(t, c.Dial(ctx)) - t.Run("missing signer", func(t *testing.T) { - tt := []struct { - name string - methodCall func() error - }{ - { - "put", - func() error { - _, err := c.ContainerPut(ctx, container.Container{}, nil, PrmContainerPut{}) - return err + return c, &handlerCalled + } + dial := func(t testing.TB, srv *putContainerServer, assertErr func(error)) (*Client, *bool) { + return _dial(t, srv, assertErr, nil) + } + t.Run("invalid signer", func(t *testing.T) { + c, err := New(anyValidURI, Options{}) + require.NoError(t, err) + _, err = c.PutContainer(ctx, cnr, nil, PutContainerOptions{}) + require.ErrorIs(t, err, errMissingSigner) + _, err = c.PutContainer(ctx, cnr, neofsecdsa.Signer(neofscryptotest.ECDSAPrivateKey()), PutContainerOptions{}) + require.EqualError(t, err, "wrong signature scheme: ECDSA_SHA512 instead of ECDSA_RFC6979_SHA256") + }) + t.Run("OK", func(t *testing.T) { + srv := srv + assertErr := func(err error) { require.NoError(t, err) } + c, handlerCalled := dial(t, &srv, assertErr) + res, err := c.PutContainer(ctx, cnr, srv.creatorSigner, PutContainerOptions{}) + assertErr(err) + require.Equal(t, srv.id, res) + require.True(t, *handlerCalled) + t.Run("with session", func(t *testing.T) { + srv := srv + sc := sessiontest.Container() + srv.session = &sc + assertErr := func(err error) { require.NoError(t, err) } + c, handlerCalled := dial(t, &srv, assertErr) + var opts PutContainerOptions + opts.WithinSession(sc) + res, err := c.PutContainer(ctx, cnr, srv.creatorSigner, opts) + assertErr(err) + require.Equal(t, srv.id, res) + require.True(t, *handlerCalled) + }) + }) + t.Run("fail", func(t *testing.T) { + t.Run("sign container", func(t *testing.T) { + srv := srv + srv.sleepDur = 0 + assertErr := func(err error) { require.ErrorContains(t, err, "sign container") } + c, handlerCalled := dial(t, &srv, assertErr) + _, err := c.PutContainer(ctx, cnr, neofscryptotest.FailSigner(srv.creatorSigner), PutContainerOptions{}) + assertErr(err) + require.True(t, *handlerCalled) + }) + t.Run("sign request", func(t *testing.T) { + srv := srv + srv.sleepDur = 0 + assertErr := func(err error) { require.ErrorContains(t, err, errSignRequest) } + c, handlerCalled := dial(t, &srv, assertErr) + _, err := c.PutContainer(ctx, cnr, newDisposableSigner(srv.creatorSigner), PutContainerOptions{}) + assertErr(err) + require.True(t, *handlerCalled) + }) + t.Run("transport", func(t *testing.T) { + srv := srv + srv.errTransport = errors.New("any transport failure") + assertErr := func(err error) { + require.ErrorContains(t, err, errTransport) + require.ErrorContains(t, err, "any transport failure") + } + c, handlerCalled := dial(t, &srv, assertErr) + _, err := c.PutContainer(ctx, cnr, srv.creatorSigner, PutContainerOptions{}) + assertErr(err) + require.True(t, *handlerCalled) + }) + t.Run("invalid response signature", func(t *testing.T) { + for i, testCase := range []struct { + err string + corrupt func(*apicontainer.PutResponse) + }{ + {err: "missing verification header", + corrupt: func(r *apicontainer.PutResponse) { r.VerifyHeader = nil }, + }, + {err: "missing body signature", + corrupt: func(r *apicontainer.PutResponse) { r.VerifyHeader.BodySignature = nil }, + }, + {err: "missing signature of the meta header", + corrupt: func(r *apicontainer.PutResponse) { r.VerifyHeader.MetaSignature = nil }, + }, + {err: "missing signature of the origin verification header", + corrupt: func(r *apicontainer.PutResponse) { r.VerifyHeader.OriginSignature = nil }, + }, + {err: "verify body signature: missing public key", + corrupt: func(r *apicontainer.PutResponse) { r.VerifyHeader.BodySignature.Key = nil }, + }, + {err: "verify signature of the meta header: missing public key", + corrupt: func(r *apicontainer.PutResponse) { r.VerifyHeader.MetaSignature.Key = nil }, + }, + {err: "verify signature of the origin verification header: missing public key", + corrupt: func(r *apicontainer.PutResponse) { r.VerifyHeader.OriginSignature.Key = nil }, + }, + {err: "verify body signature: decode public key from binary", + corrupt: func(r *apicontainer.PutResponse) { + r.VerifyHeader.BodySignature.Key = []byte("not a public key") + }, + }, + {err: "verify signature of the meta header: decode public key from binary", + corrupt: func(r *apicontainer.PutResponse) { + r.VerifyHeader.MetaSignature.Key = []byte("not a public key") + }, + }, + {err: "verify signature of the origin verification header: decode public key from binary", + corrupt: func(r *apicontainer.PutResponse) { + r.VerifyHeader.OriginSignature.Key = []byte("not a public key") + }, + }, + {err: "verify body signature: invalid scheme -1", + corrupt: func(r *apicontainer.PutResponse) { r.VerifyHeader.BodySignature.Scheme = -1 }, + }, + {err: "verify body signature: unsupported scheme 3", + corrupt: func(r *apicontainer.PutResponse) { r.VerifyHeader.BodySignature.Scheme = 3 }, }, + {err: "verify signature of the meta header: unsupported scheme 3", + corrupt: func(r *apicontainer.PutResponse) { r.VerifyHeader.MetaSignature.Scheme = 3 }, + }, + {err: "verify signature of the origin verification header: unsupported scheme 3", + corrupt: func(r *apicontainer.PutResponse) { r.VerifyHeader.OriginSignature.Scheme = 3 }, + }, + {err: "verify body signature: signature mismatch", + corrupt: func(r *apicontainer.PutResponse) { r.VerifyHeader.BodySignature.Sign[0]++ }, + }, + {err: "verify signature of the meta header: signature mismatch", + corrupt: func(r *apicontainer.PutResponse) { r.VerifyHeader.MetaSignature.Sign[0]++ }, + }, + {err: "verify signature of the origin verification header: signature mismatch", + corrupt: func(r *apicontainer.PutResponse) { r.VerifyHeader.OriginSignature.Sign[0]++ }, + }, + {err: "verify body signature: signature mismatch", + corrupt: func(r *apicontainer.PutResponse) { + r.VerifyHeader.BodySignature.Key = neofscrypto.PublicKeyBytes(neofscryptotest.RandomSigner().Public()) + }, + }, + {err: "verify signature of the meta header: signature mismatch", + corrupt: func(r *apicontainer.PutResponse) { + r.VerifyHeader.MetaSignature.Key = neofscrypto.PublicKeyBytes(neofscryptotest.RandomSigner().Public()) + }, + }, + {err: "verify signature of the origin verification header: signature mismatch", + corrupt: func(r *apicontainer.PutResponse) { + r.VerifyHeader.OriginSignature.Key = neofscrypto.PublicKeyBytes(neofscryptotest.RandomSigner().Public()) + }, + }, + } { + srv := srv + srv.corruptRespSig = testCase.corrupt + assertErr := func(err error) { + require.ErrorContains(t, err, errResponseSignature, [2]any{i, testCase}) + require.ErrorContains(t, err, testCase.err, [2]any{i, testCase}) + } + c, handlerCalled := dial(t, &srv, assertErr) + _, err := c.PutContainer(ctx, cnr, srv.creatorSigner, PutContainerOptions{}) + assertErr(err) + require.True(t, *handlerCalled) + } + }) + t.Run("invalid response status", func(t *testing.T) { + srv := srv + srv.modifyResp = func(r *apicontainer.PutResponse) { + r.MetaHeader.Status = &status.Status{Code: status.InternalServerError, Details: make([]*status.Status_Detail, 1)} + } + assertErr := func(err error) { + require.ErrorContains(t, err, errInvalidResponseStatus) + require.ErrorContains(t, err, "details attached but not supported") + } + c, handlerCalled := dial(t, &srv, assertErr) + _, err := c.PutContainer(ctx, cnr, srv.creatorSigner, PutContainerOptions{}) + assertErr(err) + require.True(t, *handlerCalled) + }) + t.Run("status errors", func(t *testing.T) { + for _, testCase := range []struct { + code uint32 + errConst error + errVar any + }{ + {code: 1 << 32 / 2}, + {code: status.InternalServerError, errConst: apistatus.ErrServerInternal, errVar: new(apistatus.InternalServerError)}, + {code: status.SignatureVerificationFail, errConst: apistatus.ErrSignatureVerification, errVar: new(apistatus.SignatureVerificationFailure)}, + } { + srv := srv + srv.modifyResp = func(r *apicontainer.PutResponse) { + r.MetaHeader.Status = &status.Status{Code: testCase.code, Message: "any message"} + } + assertErr := func(err error) { + require.ErrorIs(t, err, apistatus.Error, testCase) + require.ErrorContains(t, err, "any message", testCase) + if testCase.errConst != nil { + require.ErrorIs(t, err, testCase.errConst, testCase) + } + if testCase.errVar != nil { + require.ErrorAs(t, err, testCase.errVar, testCase) + } + } + c, handlerCalled := dial(t, &srv, assertErr) + _, err := c.PutContainer(ctx, cnr, srv.creatorSigner, PutContainerOptions{}) + assertErr(err) + require.True(t, *handlerCalled, testCase) + } + }) + t.Run("response body", func(t *testing.T) { + t.Run("missing", func(t *testing.T) { + srv := srv + assertErr := func(err error) { require.EqualError(t, err, "invalid response: missing body") } + c, handlerCalled := dial(t, &srv, assertErr) + srv.modifyResp = func(r *apicontainer.PutResponse) { r.Body = nil } + _, err := c.PutContainer(ctx, cnr, srv.creatorSigner, PutContainerOptions{}) + assertErr(err) + require.True(t, *handlerCalled) + }) + t.Run("missing container ID", func(t *testing.T) { + srv := srv + srv.modifyResp = func(r *apicontainer.PutResponse) { r.Body.ContainerId = nil } + assertErr := func(err error) { + require.EqualError(t, err, "invalid response: invalid body: missing required field (ID)") + } + c, handlerCalled := dial(t, &srv, assertErr) + _, err := c.PutContainer(ctx, cnr, srv.creatorSigner, PutContainerOptions{}) + assertErr(err) + require.True(t, *handlerCalled) + }) + t.Run("invalid container ID", func(t *testing.T) { + srv := srv + srv.modifyResp = func(r *apicontainer.PutResponse) { r.Body.ContainerId.Value = make([]byte, 31) } + assertErr := func(err error) { + require.EqualError(t, err, "invalid response: invalid body: invalid field (ID): invalid value length 31") + } + c, handlerCalled := dial(t, &srv, assertErr) + _, err := c.PutContainer(ctx, cnr, srv.creatorSigner, PutContainerOptions{}) + assertErr(err) + require.True(t, *handlerCalled) + }) + }) + }) + t.Run("response info handler", func(t *testing.T) { + t.Run("OK", func(t *testing.T) { + srv := srv + srv.epoch = 3598503 + assertErr := func(err error) { require.NoError(t, err) } + respHandlerCalled := false + c, reqHandlerCalled := _dial(t, &srv, assertErr, func(opts *Options) { + opts.SetAPIResponseInfoInterceptor(func(info ResponseMetaInfo) error { + respHandlerCalled = true + require.EqualValues(t, 3598503, info.Epoch()) + require.Equal(t, neofscrypto.PublicKeyBytes(srv.serverSigner.Public()), info.ResponderKey()) + return nil + }) + }) + _, err := c.PutContainer(ctx, cnr, srv.creatorSigner, PutContainerOptions{}) + assertErr(err) + require.True(t, respHandlerCalled) + require.True(t, *reqHandlerCalled) + }) + t.Run("fail", func(t *testing.T) { + srv := srv + srv.epoch = 4386380643 + assertErr := func(err error) { require.EqualError(t, err, "intercept response info: some handler error") } + respHandlerCalled := false + c, reqHandlerCalled := _dial(t, &srv, assertErr, func(opts *Options) { + opts.SetAPIResponseInfoInterceptor(func(info ResponseMetaInfo) error { + if !respHandlerCalled { // dial + respHandlerCalled = true + return nil + } + require.EqualValues(t, 4386380643, info.Epoch()) + require.Equal(t, neofscrypto.PublicKeyBytes(srv.serverSigner.Public()), info.ResponderKey()) + return errors.New("some handler error") + }) + }) + _, err := c.PutContainer(ctx, cnr, srv.creatorSigner, PutContainerOptions{}) + assertErr(err) + require.True(t, respHandlerCalled) + require.True(t, *reqHandlerCalled) + }) + }) +} + +type getContainerServer struct { + noOtherContainerCalls + // client + id cid.ID + clientSigScheme neofscrypto.Scheme + clientPubKey []byte + // server + sleepDur time.Duration + endpointInfoOnDialServer + container container.Container + errTransport error + modifyResp func(*apicontainer.GetResponse) + corruptRespSig func(*apicontainer.GetResponse) +} + +func (x getContainerServer) Get(ctx context.Context, req *apicontainer.GetRequest) (*apicontainer.GetResponse, error) { + if x.sleepDur > 0 { + time.Sleep(x.sleepDur) + } + if x.errTransport != nil { + return nil, x.errTransport + } + var sts status.Status + resp := apicontainer.GetResponse{ + MetaHeader: &apisession.ResponseMetaHeader{Status: &sts, Epoch: x.epoch}, + } + var err error + var id cid.ID + if ctx == nil { + sts.Code, sts.Message = status.InternalServerError, "nil context" + } else if req == nil { + sts.Code, sts.Message = status.InternalServerError, "nil request" + } else if err = neofscrypto.VerifyRequest(req, req.Body); err != nil { + sts.Code, sts.Message = status.SignatureVerificationFail, err.Error() + } else if req.VerifyHeader.BodySignature.Scheme != refs.SignatureScheme(x.clientSigScheme) || + !bytes.Equal(req.VerifyHeader.BodySignature.Key, x.clientPubKey) { + sts.Code, sts.Message = status.InternalServerError, "[test] unexpected request body signature credentials" + } else if req.VerifyHeader.MetaSignature.Scheme != refs.SignatureScheme(x.clientSigScheme) || + !bytes.Equal(req.VerifyHeader.MetaSignature.Key, x.clientPubKey) { + sts.Code, sts.Message = status.InternalServerError, "[test] unexpected request meta header signature credentials" + } else if req.VerifyHeader.OriginSignature.Scheme != refs.SignatureScheme(x.clientSigScheme) || + !bytes.Equal(req.VerifyHeader.OriginSignature.Key, x.clientPubKey) { + sts.Code, sts.Message = status.InternalServerError, "[test] unexpected origin request verification header signature credentials" + } else if req.MetaHeader != nil { + sts.Code, sts.Message = status.InternalServerError, "meta header is set" + } else if req.Body == nil { + sts.Code, sts.Message = status.InternalServerError, "missing request body" + } else if req.Body.ContainerId == nil { + sts.Code, sts.Message = status.InternalServerError, "invalid request body: missing ID" + } else if err = id.ReadFromV2(req.Body.ContainerId); err != nil { + sts.Code, sts.Message = status.InternalServerError, fmt.Sprintf("invalid request body: invalid ID field: %s", err) + } else if id != x.id { + sts.Code, sts.Message = status.InternalServerError, "[test] wrong ID" + } else { + resp.MetaHeader.Status = nil + resp.Body = &apicontainer.GetResponse_Body{ + Container: new(apicontainer.Container), + Signature: &refs.SignatureRFC6979{ + Key: []byte("any_public_key"), + Sign: []byte("any_signature"), }, - { - "delete", - func() error { - return c.ContainerDelete(ctx, cid.ID{}, nil, PrmContainerDelete{}) + SessionToken: &apisession.SessionToken{ + Body: &apisession.SessionToken_Body{ + Id: []byte("any_ID"), + OwnerId: &refs.OwnerID{Value: []byte("any_owner")}, + Lifetime: &apisession.SessionToken_Body_TokenLifetime{ + Exp: rand.Uint64(), + Nbf: rand.Uint64(), + Iat: rand.Uint64(), + }, + SessionKey: []byte("any_session_key"), + Context: &apisession.SessionToken_Body_Object{ + Object: &apisession.ObjectSessionContext{ + Verb: apisession.ObjectSessionContext_Verb(rand.Int31()), + Target: &apisession.ObjectSessionContext_Target{ + Container: &refs.ContainerID{Value: []byte("any_container")}, + Objects: []*refs.ObjectID{{Value: []byte("any_object1")}, {Value: []byte("any_object2")}}, + }, + }, + }, + }, + Signature: &refs.Signature{ + Key: []byte("any_public_key"), + Sign: []byte("any_signature"), + Scheme: refs.SignatureScheme(rand.Int31()), }, }, - { - "set_eacl", - func() error { - return c.ContainerSetEACL(ctx, eacl.Table{}, nil, PrmContainerSetEACL{}) + } + x.container.WriteToV2(resp.Body.Container) + } + if x.modifyResp != nil { + x.modifyResp(&resp) + } + resp.VerifyHeader, err = neofscrypto.SignResponse(x.serverSigner, &resp, resp.Body, nil) + if err != nil { + return nil, fmt.Errorf("sign response: %w", err) + } + if x.corruptRespSig != nil { + x.corruptRespSig(&resp) + } + return &resp, nil +} + +func TestClient_GetContainer(t *testing.T) { + ctx := context.Background() + var srv getContainerServer + srv.sleepDur = 10 * time.Millisecond + srv.serverSigner = neofscryptotest.RandomSigner() + srv.latestVersion = versiontest.Version() + srv.nodeInfo = netmaptest.NodeInfo() + srv.nodeInfo.SetPublicKey(neofscrypto.PublicKeyBytes(srv.serverSigner.Public())) + srv.id = cidtest.ID() + srv.container = containertest.Container() + _dial := func(t testing.TB, srv *getContainerServer, assertErr func(error), customizeOpts func(*Options)) (*Client, *bool) { + var opts Options + var handlerCalled bool + opts.SetAPIRequestResultHandler(func(nodeKey []byte, endpoint string, op stat.Method, dur time.Duration, err error) { + handlerCalled = true + require.Equal(t, srv.nodeInfo.PublicKey(), nodeKey) + require.Equal(t, "localhost:8080", endpoint) + require.Equal(t, stat.MethodContainerGet, op) + require.Greater(t, dur, srv.sleepDur) + assertErr(err) + }) + if customizeOpts != nil { + customizeOpts(&opts) + } + + c, err := New(anyValidURI, opts) + require.NoError(t, err) + srv.clientSigScheme = c.signer.Scheme() + srv.clientPubKey = neofscrypto.PublicKeyBytes(c.signer.Public()) + + conn := bufconn.Listen(10 << 10) + gs := grpc.NewServer() + apinetmap.RegisterNetmapServiceServer(gs, srv) + apicontainer.RegisterContainerServiceServer(gs, srv) + go func() { _ = gs.Serve(conn) }() + t.Cleanup(gs.Stop) + + c.dial = func(ctx context.Context, _ string) (net.Conn, error) { return conn.DialContext(ctx) } + require.NoError(t, c.Dial(ctx)) + + return c, &handlerCalled + } + dial := func(t testing.TB, srv *getContainerServer, assertErr func(error)) (*Client, *bool) { + return _dial(t, srv, assertErr, nil) + } + t.Run("OK", func(t *testing.T) { + srv := srv + assertErr := func(err error) { require.NoError(t, err) } + c, handlerCalled := dial(t, &srv, assertErr) + res, err := c.GetContainer(ctx, srv.id, GetContainerOptions{}) + assertErr(err) + if !assert.ObjectsAreEqual(srv.container, res) { + // can be caused by gRPC service fields, binaries must still be equal + require.Equal(t, srv.container.Marshal(), res.Marshal()) + } + require.True(t, *handlerCalled) + }) + t.Run("fail", func(t *testing.T) { + t.Run("sign request", func(t *testing.T) { + srv := srv + srv.sleepDur = 0 + assertErr := func(err error) { require.ErrorContains(t, err, errSignRequest) } + c, handlerCalled := dial(t, &srv, assertErr) + c.signer = neofscryptotest.FailSigner(c.signer) + _, err := c.GetContainer(ctx, srv.id, GetContainerOptions{}) + assertErr(err) + require.True(t, *handlerCalled) + }) + t.Run("transport", func(t *testing.T) { + srv := srv + srv.errTransport = errors.New("any transport failure") + assertErr := func(err error) { + require.ErrorContains(t, err, errTransport) + require.ErrorContains(t, err, "any transport failure") + } + c, handlerCalled := dial(t, &srv, assertErr) + _, err := c.GetContainer(ctx, srv.id, GetContainerOptions{}) + assertErr(err) + require.True(t, *handlerCalled) + }) + t.Run("invalid response signature", func(t *testing.T) { + for i, testCase := range []struct { + err string + corrupt func(*apicontainer.GetResponse) + }{ + {err: "missing verification header", + corrupt: func(r *apicontainer.GetResponse) { r.VerifyHeader = nil }, + }, + {err: "missing body signature", + corrupt: func(r *apicontainer.GetResponse) { r.VerifyHeader.BodySignature = nil }, + }, + {err: "missing signature of the meta header", + corrupt: func(r *apicontainer.GetResponse) { r.VerifyHeader.MetaSignature = nil }, + }, + {err: "missing signature of the origin verification header", + corrupt: func(r *apicontainer.GetResponse) { r.VerifyHeader.OriginSignature = nil }, + }, + {err: "verify body signature: missing public key", + corrupt: func(r *apicontainer.GetResponse) { r.VerifyHeader.BodySignature.Key = nil }, + }, + {err: "verify signature of the meta header: missing public key", + corrupt: func(r *apicontainer.GetResponse) { r.VerifyHeader.MetaSignature.Key = nil }, + }, + {err: "verify signature of the origin verification header: missing public key", + corrupt: func(r *apicontainer.GetResponse) { r.VerifyHeader.OriginSignature.Key = nil }, + }, + {err: "verify body signature: decode public key from binary", + corrupt: func(r *apicontainer.GetResponse) { + r.VerifyHeader.BodySignature.Key = []byte("not a public key") + }, + }, + {err: "verify signature of the meta header: decode public key from binary", + corrupt: func(r *apicontainer.GetResponse) { + r.VerifyHeader.MetaSignature.Key = []byte("not a public key") + }, + }, + {err: "verify signature of the origin verification header: decode public key from binary", + corrupt: func(r *apicontainer.GetResponse) { + r.VerifyHeader.OriginSignature.Key = []byte("not a public key") + }, + }, + {err: "verify body signature: invalid scheme -1", + corrupt: func(r *apicontainer.GetResponse) { r.VerifyHeader.BodySignature.Scheme = -1 }, + }, + {err: "verify body signature: unsupported scheme 3", + corrupt: func(r *apicontainer.GetResponse) { r.VerifyHeader.BodySignature.Scheme = 3 }, + }, + {err: "verify signature of the meta header: unsupported scheme 3", + corrupt: func(r *apicontainer.GetResponse) { r.VerifyHeader.MetaSignature.Scheme = 3 }, + }, + {err: "verify signature of the origin verification header: unsupported scheme 3", + corrupt: func(r *apicontainer.GetResponse) { r.VerifyHeader.OriginSignature.Scheme = 3 }, + }, + {err: "verify body signature: signature mismatch", + corrupt: func(r *apicontainer.GetResponse) { r.VerifyHeader.BodySignature.Sign[0]++ }, + }, + {err: "verify signature of the meta header: signature mismatch", + corrupt: func(r *apicontainer.GetResponse) { r.VerifyHeader.MetaSignature.Sign[0]++ }, + }, + {err: "verify signature of the origin verification header: signature mismatch", + corrupt: func(r *apicontainer.GetResponse) { r.VerifyHeader.OriginSignature.Sign[0]++ }, + }, + {err: "verify body signature: signature mismatch", + corrupt: func(r *apicontainer.GetResponse) { + r.VerifyHeader.BodySignature.Key = neofscrypto.PublicKeyBytes(neofscryptotest.RandomSigner().Public()) + }, + }, + {err: "verify signature of the meta header: signature mismatch", + corrupt: func(r *apicontainer.GetResponse) { + r.VerifyHeader.MetaSignature.Key = neofscrypto.PublicKeyBytes(neofscryptotest.RandomSigner().Public()) + }, + }, + {err: "verify signature of the origin verification header: signature mismatch", + corrupt: func(r *apicontainer.GetResponse) { + r.VerifyHeader.OriginSignature.Key = neofscrypto.PublicKeyBytes(neofscryptotest.RandomSigner().Public()) + }, + }, + } { + srv := srv + srv.corruptRespSig = testCase.corrupt + assertErr := func(err error) { + require.ErrorContains(t, err, errResponseSignature, [2]any{i, testCase}) + require.ErrorContains(t, err, testCase.err, [2]any{i, testCase}) + } + c, handlerCalled := dial(t, &srv, assertErr) + _, err := c.GetContainer(ctx, srv.id, GetContainerOptions{}) + assertErr(err) + require.True(t, *handlerCalled) + } + }) + t.Run("invalid response status", func(t *testing.T) { + srv := srv + srv.modifyResp = func(r *apicontainer.GetResponse) { + r.MetaHeader.Status = &status.Status{Code: status.InternalServerError, Details: make([]*status.Status_Detail, 1)} + } + assertErr := func(err error) { + require.ErrorContains(t, err, errInvalidResponseStatus) + require.ErrorContains(t, err, "details attached but not supported") + } + c, handlerCalled := dial(t, &srv, assertErr) + _, err := c.GetContainer(ctx, srv.id, GetContainerOptions{}) + assertErr(err) + require.True(t, *handlerCalled) + }) + t.Run("status errors", func(t *testing.T) { + for _, testCase := range []struct { + code uint32 + errConst error + errVar any + }{ + {code: 1 << 32 / 2}, + {code: status.InternalServerError, errConst: apistatus.ErrServerInternal, errVar: new(apistatus.InternalServerError)}, + {code: status.SignatureVerificationFail, errConst: apistatus.ErrSignatureVerification, errVar: new(apistatus.SignatureVerificationFailure)}, + {code: status.ContainerNotFound, errConst: apistatus.ErrContainerNotFound, errVar: new(apistatus.ContainerNotFound)}, + } { + srv := srv + srv.modifyResp = func(r *apicontainer.GetResponse) { + r.MetaHeader.Status = &status.Status{Code: testCase.code, Message: "any message"} + } + assertErr := func(err error) { + require.ErrorIs(t, err, apistatus.Error, testCase) + require.ErrorContains(t, err, "any message", testCase) + if testCase.errConst != nil { + require.ErrorIs(t, err, testCase.errConst, testCase) + } + if testCase.errVar != nil { + require.ErrorAs(t, err, testCase.errVar, testCase) + } + } + c, handlerCalled := dial(t, &srv, assertErr) + _, err := c.GetContainer(ctx, srv.id, GetContainerOptions{}) + assertErr(err) + require.True(t, *handlerCalled, testCase) + } + }) + t.Run("response body", func(t *testing.T) { + t.Run("missing", func(t *testing.T) { + srv := srv + assertErr := func(err error) { require.EqualError(t, err, "invalid response: missing body") } + c, handlerCalled := dial(t, &srv, assertErr) + srv.modifyResp = func(r *apicontainer.GetResponse) { r.Body = nil } + _, err := c.GetContainer(ctx, srv.id, GetContainerOptions{}) + assertErr(err) + require.True(t, *handlerCalled) + }) + t.Run("missing container", func(t *testing.T) { + srv := srv + srv.modifyResp = func(r *apicontainer.GetResponse) { r.Body.Container = nil } + assertErr := func(err error) { + require.EqualError(t, err, "invalid response: invalid body: missing required field (container)") + } + c, handlerCalled := dial(t, &srv, assertErr) + _, err := c.GetContainer(ctx, srv.id, GetContainerOptions{}) + assertErr(err) + require.True(t, *handlerCalled) + }) + t.Run("invalid container", func(t *testing.T) { + testCases := []struct { + name string + err string + contains bool + corrupt func(*apicontainer.Container) + }{ + {name: "missing version", err: "missing version", corrupt: func(c *apicontainer.Container) { + c.Version = nil + }}, + {name: "missing owner", err: "missing owner", corrupt: func(c *apicontainer.Container) { + c.OwnerId = nil + }}, + {name: "nil nonce", err: "missing nonce", corrupt: func(c *apicontainer.Container) { + c.Nonce = nil + }}, + {name: "empty nonce", err: "missing nonce", corrupt: func(c *apicontainer.Container) { + c.Nonce = []byte{} + }}, + {name: "missing policy", err: "missing placement policy", corrupt: func(c *apicontainer.Container) { + c.PlacementPolicy = nil + }}, + {name: "owner/nil value", err: "invalid owner: missing value field", corrupt: func(c *apicontainer.Container) { + c.OwnerId.Value = nil + }}, + {name: "owner/empty value", err: "invalid owner: missing value field", corrupt: func(c *apicontainer.Container) { + c.OwnerId.Value = []byte{} + }}, + {name: "owner/wrong length", err: "invalid owner: invalid value length 24", corrupt: func(c *apicontainer.Container) { + c.OwnerId.Value = make([]byte, 24) + }}, + {name: "owner/wrong prefix", err: "invalid owner: invalid prefix byte 0x34, expected 0x35", corrupt: func(c *apicontainer.Container) { + c.OwnerId.Value[0] = 0x34 + }}, + {name: "owner/checksum mismatch", err: "invalid owner: value checksum mismatch", corrupt: func(c *apicontainer.Container) { + c.OwnerId.Value[24]++ + }}, + {name: "nonce/wrong length", err: "invalid nonce: invalid UUID (got 15 bytes)", corrupt: func(c *apicontainer.Container) { + c.Nonce = make([]byte, 15) + }}, + {name: "nonce/wrong version", err: "invalid nonce: wrong UUID version 3", corrupt: func(c *apicontainer.Container) { + c.Nonce[6] = 3 << 4 + }}, + {name: "nonce/nil replicas", err: "invalid placement policy: missing replicas", corrupt: func(c *apicontainer.Container) { + c.PlacementPolicy.Replicas = nil + }}, + {name: "attributes/empty key", err: "invalid attribute #1: missing key", corrupt: func(c *apicontainer.Container) { + c.Attributes = []*apicontainer.Container_Attribute{ + {Key: "key_valid", Value: "any"}, + {Key: "", Value: "any"}, + } + }}, + {name: "attributes/repeated keys", err: "multiple attributes with key=k2", corrupt: func(c *apicontainer.Container) { + c.Attributes = []*apicontainer.Container_Attribute{ + {Key: "k1", Value: "any"}, + {Key: "k2", Value: "1"}, + {Key: "k3", Value: "any"}, + {Key: "k2", Value: "2"}, + } + }}, + {name: "attributes/empty value", err: "invalid attribute #1 (key2): missing value", corrupt: func(c *apicontainer.Container) { + c.Attributes = []*apicontainer.Container_Attribute{ + {Key: "key1", Value: "any"}, + {Key: "key2", Value: ""}, + } + }}, + {name: "attributes/invalid timestamp", err: "invalid timestamp attribute (#1): invalid integer", contains: true, corrupt: func(c *apicontainer.Container) { + c.Attributes = []*apicontainer.Container_Attribute{ + {Key: "key1", Value: "any"}, + {Key: "Timestamp", Value: "not_a_number"}, + } + }}, + } + for i := range testCases { + srv := srv + srv.modifyResp = func(r *apicontainer.GetResponse) { + testCases[i].corrupt(r.Body.Container) + } + assertErr := func(err error) { + if testCases[i].contains { + require.ErrorContains(t, err, fmt.Sprintf("invalid response: invalid body: invalid field (container): %s", testCases[i].err)) + } else { + require.EqualError(t, err, fmt.Sprintf("invalid response: invalid body: invalid field (container): %s", testCases[i].err)) + } + } + c, handlerCalled := dial(t, &srv, assertErr) + _, err := c.GetContainer(ctx, srv.id, GetContainerOptions{}) + assertErr(err) + require.True(t, *handlerCalled, testCases[i].name) + } + }) + }) + }) + t.Run("response info handler", func(t *testing.T) { + t.Run("OK", func(t *testing.T) { + srv := srv + srv.epoch = 3598503 + assertErr := func(err error) { require.NoError(t, err) } + respHandlerCalled := false + c, reqHandlerCalled := _dial(t, &srv, assertErr, func(opts *Options) { + opts.SetAPIResponseInfoInterceptor(func(info ResponseMetaInfo) error { + respHandlerCalled = true + require.EqualValues(t, 3598503, info.Epoch()) + require.Equal(t, neofscrypto.PublicKeyBytes(srv.serverSigner.Public()), info.ResponderKey()) + return nil + }) + }) + _, err := c.GetContainer(ctx, srv.id, GetContainerOptions{}) + assertErr(err) + require.True(t, respHandlerCalled) + require.True(t, *reqHandlerCalled) + }) + t.Run("fail", func(t *testing.T) { + srv := srv + srv.epoch = 4386380643 + assertErr := func(err error) { require.ErrorContains(t, err, "intercept response info: some handler error") } + respHandlerCalled := false + c, reqHandlerCalled := _dial(t, &srv, assertErr, func(opts *Options) { + opts.SetAPIResponseInfoInterceptor(func(info ResponseMetaInfo) error { + if !respHandlerCalled { // dial + respHandlerCalled = true + return nil + } + require.EqualValues(t, 4386380643, info.Epoch()) + require.Equal(t, neofscrypto.PublicKeyBytes(srv.serverSigner.Public()), info.ResponderKey()) + return errors.New("some handler error") + }) + }) + _, err := c.GetContainer(ctx, srv.id, GetContainerOptions{}) + assertErr(err) + require.True(t, respHandlerCalled) + require.True(t, *reqHandlerCalled) + }) + }) +} + +type listContainersServer struct { + noOtherContainerCalls + // client + usr user.ID + clientSigScheme neofscrypto.Scheme + clientPubKey []byte + // server + sleepDur time.Duration + endpointInfoOnDialServer + containers []cid.ID + errTransport error + modifyResp func(*apicontainer.ListResponse) + corruptRespSig func(*apicontainer.ListResponse) +} + +func (x listContainersServer) List(ctx context.Context, req *apicontainer.ListRequest) (*apicontainer.ListResponse, error) { + if x.sleepDur > 0 { + time.Sleep(x.sleepDur) + } + if x.errTransport != nil { + return nil, x.errTransport + } + var sts status.Status + resp := apicontainer.ListResponse{ + MetaHeader: &apisession.ResponseMetaHeader{Status: &sts, Epoch: x.epoch}, + } + var err error + var usr user.ID + if ctx == nil { + sts.Code, sts.Message = status.InternalServerError, "nil context" + } else if req == nil { + sts.Code, sts.Message = status.InternalServerError, "nil request" + } else if err = neofscrypto.VerifyRequest(req, req.Body); err != nil { + sts.Code, sts.Message = status.SignatureVerificationFail, err.Error() + } else if req.VerifyHeader.BodySignature.Scheme != refs.SignatureScheme(x.clientSigScheme) || + !bytes.Equal(req.VerifyHeader.BodySignature.Key, x.clientPubKey) { + sts.Code, sts.Message = status.InternalServerError, "[test] unexpected request body signature credentials" + } else if req.VerifyHeader.MetaSignature.Scheme != refs.SignatureScheme(x.clientSigScheme) || + !bytes.Equal(req.VerifyHeader.MetaSignature.Key, x.clientPubKey) { + sts.Code, sts.Message = status.InternalServerError, "[test] unexpected request meta header signature credentials" + } else if req.VerifyHeader.OriginSignature.Scheme != refs.SignatureScheme(x.clientSigScheme) || + !bytes.Equal(req.VerifyHeader.OriginSignature.Key, x.clientPubKey) { + sts.Code, sts.Message = status.InternalServerError, "[test] unexpected origin request verification header signature credentials" + } else if req.MetaHeader != nil { + sts.Code, sts.Message = status.InternalServerError, "meta header is set" + } else if req.Body == nil { + sts.Code, sts.Message = status.InternalServerError, "missing request body" + } else if req.Body.OwnerId == nil { + sts.Code, sts.Message = status.InternalServerError, "invalid request body: missing user" + } else if err = usr.ReadFromV2(req.Body.OwnerId); err != nil { + sts.Code, sts.Message = status.InternalServerError, fmt.Sprintf("invalid request body: invalid user field: %s", err) + } else if usr != x.usr { + sts.Code, sts.Message = status.InternalServerError, "[test] wrong user" + } else { + resp.MetaHeader.Status = nil + if len(x.containers) > 0 { + resp.Body = &apicontainer.ListResponse_Body{ContainerIds: make([]*refs.ContainerID, len(x.containers))} + for i := range x.containers { + resp.Body.ContainerIds[i] = new(refs.ContainerID) + x.containers[i].WriteToV2(resp.Body.ContainerIds[i]) + } + } + } + if x.modifyResp != nil { + x.modifyResp(&resp) + } + resp.VerifyHeader, err = neofscrypto.SignResponse(x.serverSigner, &resp, resp.Body, nil) + if err != nil { + return nil, fmt.Errorf("sign response: %w", err) + } + if x.corruptRespSig != nil { + x.corruptRespSig(&resp) + } + return &resp, nil +} + +func TestClient_ListContainers(t *testing.T) { + ctx := context.Background() + var srv listContainersServer + srv.sleepDur = 10 * time.Millisecond + srv.serverSigner = neofscryptotest.RandomSigner() + srv.latestVersion = versiontest.Version() + srv.nodeInfo = netmaptest.NodeInfo() + srv.nodeInfo.SetPublicKey(neofscrypto.PublicKeyBytes(srv.serverSigner.Public())) + srv.usr = usertest.ID() + srv.containers = cidtest.NIDs(5) + _dial := func(t testing.TB, srv *listContainersServer, assertErr func(error), customizeOpts func(*Options)) (*Client, *bool) { + var opts Options + var handlerCalled bool + opts.SetAPIRequestResultHandler(func(nodeKey []byte, endpoint string, op stat.Method, dur time.Duration, err error) { + handlerCalled = true + require.Equal(t, srv.nodeInfo.PublicKey(), nodeKey) + require.Equal(t, "localhost:8080", endpoint) + require.Equal(t, stat.MethodContainerList, op) + require.Greater(t, dur, srv.sleepDur) + assertErr(err) + }) + if customizeOpts != nil { + customizeOpts(&opts) + } + + c, err := New(anyValidURI, opts) + require.NoError(t, err) + srv.clientSigScheme = c.signer.Scheme() + srv.clientPubKey = neofscrypto.PublicKeyBytes(c.signer.Public()) + + conn := bufconn.Listen(10 << 10) + gs := grpc.NewServer() + apinetmap.RegisterNetmapServiceServer(gs, srv) + apicontainer.RegisterContainerServiceServer(gs, srv) + go func() { _ = gs.Serve(conn) }() + t.Cleanup(gs.Stop) + + c.dial = func(ctx context.Context, _ string) (net.Conn, error) { return conn.DialContext(ctx) } + require.NoError(t, c.Dial(ctx)) + + return c, &handlerCalled + } + dial := func(t testing.TB, srv *listContainersServer, assertErr func(error)) (*Client, *bool) { + return _dial(t, srv, assertErr, nil) + } + t.Run("OK", func(t *testing.T) { + srv := srv + assertErr := func(err error) { require.NoError(t, err) } + c, handlerCalled := dial(t, &srv, assertErr) + res, err := c.ListContainers(ctx, srv.usr, ListContainersOptions{}) + assertErr(err) + require.Equal(t, srv.containers, res) + require.True(t, *handlerCalled) + + srv.containers = nil + res, err = c.ListContainers(ctx, srv.usr, ListContainersOptions{}) + assertErr(err) + require.Empty(t, res) + }) + t.Run("fail", func(t *testing.T) { + t.Run("sign request", func(t *testing.T) { + srv := srv + srv.sleepDur = 0 + assertErr := func(err error) { require.ErrorContains(t, err, errSignRequest) } + c, handlerCalled := dial(t, &srv, assertErr) + c.signer = neofscryptotest.FailSigner(c.signer) + _, err := c.ListContainers(ctx, srv.usr, ListContainersOptions{}) + assertErr(err) + require.True(t, *handlerCalled) + }) + t.Run("transport", func(t *testing.T) { + srv := srv + srv.errTransport = errors.New("any transport failure") + assertErr := func(err error) { + require.ErrorContains(t, err, errTransport) + require.ErrorContains(t, err, "any transport failure") + } + c, handlerCalled := dial(t, &srv, assertErr) + _, err := c.ListContainers(ctx, srv.usr, ListContainersOptions{}) + assertErr(err) + require.True(t, *handlerCalled) + }) + t.Run("invalid response signature", func(t *testing.T) { + for i, testCase := range []struct { + err string + corrupt func(*apicontainer.ListResponse) + }{ + {err: "missing verification header", + corrupt: func(r *apicontainer.ListResponse) { r.VerifyHeader = nil }, + }, + {err: "missing body signature", + corrupt: func(r *apicontainer.ListResponse) { r.VerifyHeader.BodySignature = nil }, + }, + {err: "missing signature of the meta header", + corrupt: func(r *apicontainer.ListResponse) { r.VerifyHeader.MetaSignature = nil }, + }, + {err: "missing signature of the origin verification header", + corrupt: func(r *apicontainer.ListResponse) { r.VerifyHeader.OriginSignature = nil }, + }, + {err: "verify body signature: missing public key", + corrupt: func(r *apicontainer.ListResponse) { r.VerifyHeader.BodySignature.Key = nil }, + }, + {err: "verify signature of the meta header: missing public key", + corrupt: func(r *apicontainer.ListResponse) { r.VerifyHeader.MetaSignature.Key = nil }, + }, + {err: "verify signature of the origin verification header: missing public key", + corrupt: func(r *apicontainer.ListResponse) { r.VerifyHeader.OriginSignature.Key = nil }, + }, + {err: "verify body signature: decode public key from binary", + corrupt: func(r *apicontainer.ListResponse) { + r.VerifyHeader.BodySignature.Key = []byte("not a public key") + }, + }, + {err: "verify signature of the meta header: decode public key from binary", + corrupt: func(r *apicontainer.ListResponse) { + r.VerifyHeader.MetaSignature.Key = []byte("not a public key") + }, + }, + {err: "verify signature of the origin verification header: decode public key from binary", + corrupt: func(r *apicontainer.ListResponse) { + r.VerifyHeader.OriginSignature.Key = []byte("not a public key") + }, + }, + {err: "verify body signature: invalid scheme -1", + corrupt: func(r *apicontainer.ListResponse) { r.VerifyHeader.BodySignature.Scheme = -1 }, + }, + {err: "verify body signature: unsupported scheme 3", + corrupt: func(r *apicontainer.ListResponse) { r.VerifyHeader.BodySignature.Scheme = 3 }, + }, + {err: "verify signature of the meta header: unsupported scheme 3", + corrupt: func(r *apicontainer.ListResponse) { r.VerifyHeader.MetaSignature.Scheme = 3 }, + }, + {err: "verify signature of the origin verification header: unsupported scheme 3", + corrupt: func(r *apicontainer.ListResponse) { r.VerifyHeader.OriginSignature.Scheme = 3 }, + }, + {err: "verify body signature: signature mismatch", + corrupt: func(r *apicontainer.ListResponse) { r.VerifyHeader.BodySignature.Sign[0]++ }, + }, + {err: "verify signature of the meta header: signature mismatch", + corrupt: func(r *apicontainer.ListResponse) { r.VerifyHeader.MetaSignature.Sign[0]++ }, + }, + {err: "verify signature of the origin verification header: signature mismatch", + corrupt: func(r *apicontainer.ListResponse) { r.VerifyHeader.OriginSignature.Sign[0]++ }, + }, + {err: "verify body signature: signature mismatch", + corrupt: func(r *apicontainer.ListResponse) { + r.VerifyHeader.BodySignature.Key = neofscrypto.PublicKeyBytes(neofscryptotest.RandomSigner().Public()) + }, + }, + {err: "verify signature of the meta header: signature mismatch", + corrupt: func(r *apicontainer.ListResponse) { + r.VerifyHeader.MetaSignature.Key = neofscrypto.PublicKeyBytes(neofscryptotest.RandomSigner().Public()) + }, + }, + {err: "verify signature of the origin verification header: signature mismatch", + corrupt: func(r *apicontainer.ListResponse) { + r.VerifyHeader.OriginSignature.Key = neofscrypto.PublicKeyBytes(neofscryptotest.RandomSigner().Public()) + }, + }, + } { + srv := srv + srv.corruptRespSig = testCase.corrupt + assertErr := func(err error) { + require.ErrorContains(t, err, errResponseSignature, [2]any{i, testCase}) + require.ErrorContains(t, err, testCase.err, [2]any{i, testCase}) + } + c, handlerCalled := dial(t, &srv, assertErr) + _, err := c.ListContainers(ctx, srv.usr, ListContainersOptions{}) + assertErr(err) + require.True(t, *handlerCalled) + } + }) + t.Run("invalid response status", func(t *testing.T) { + srv := srv + srv.modifyResp = func(r *apicontainer.ListResponse) { + r.MetaHeader.Status = &status.Status{Code: status.InternalServerError, Details: make([]*status.Status_Detail, 1)} + } + assertErr := func(err error) { + require.ErrorContains(t, err, errInvalidResponseStatus) + require.ErrorContains(t, err, "details attached but not supported") + } + c, handlerCalled := dial(t, &srv, assertErr) + _, err := c.ListContainers(ctx, srv.usr, ListContainersOptions{}) + assertErr(err) + require.True(t, *handlerCalled) + }) + t.Run("status errors", func(t *testing.T) { + for _, testCase := range []struct { + code uint32 + errConst error + errVar any + }{ + {code: 1 << 32 / 2}, + {code: status.InternalServerError, errConst: apistatus.ErrServerInternal, errVar: new(apistatus.InternalServerError)}, + {code: status.SignatureVerificationFail, errConst: apistatus.ErrSignatureVerification, errVar: new(apistatus.SignatureVerificationFailure)}, + } { + srv := srv + srv.modifyResp = func(r *apicontainer.ListResponse) { + r.MetaHeader.Status = &status.Status{Code: testCase.code, Message: "any message"} + } + assertErr := func(err error) { + require.ErrorIs(t, err, apistatus.Error, testCase) + require.ErrorContains(t, err, "any message", testCase) + if testCase.errConst != nil { + require.ErrorIs(t, err, testCase.errConst, testCase) + } + if testCase.errVar != nil { + require.ErrorAs(t, err, testCase.errVar, testCase) + } + } + c, handlerCalled := dial(t, &srv, assertErr) + _, err := c.ListContainers(ctx, srv.usr, ListContainersOptions{}) + assertErr(err) + require.True(t, *handlerCalled, testCase) + } + }) + t.Run("response body", func(t *testing.T) { + t.Run("invalid IDs", func(t *testing.T) { + testCases := []struct { + name string + err string + corrupt func([]*refs.ContainerID) + }{ + // nil-ness is "lost" on gRPC transmission: element is decoded as zero structure, + // therefore we won't reach 'nil element' error but the next one + {name: "nil", err: "invalid element #1: missing value field", corrupt: func(ids []*refs.ContainerID) { + ids[1] = nil + }}, + {name: "wrong length", err: "invalid element #2: invalid value length 31", corrupt: func(ids []*refs.ContainerID) { + ids[2].Value = make([]byte, 31) + }}, + } + for i := range testCases { + srv := srv + srv.modifyResp = func(r *apicontainer.ListResponse) { + testCases[i].corrupt(r.Body.ContainerIds) + } + assertErr := func(err error) { + require.EqualError(t, err, fmt.Sprintf("invalid response: invalid body: invalid field (ID list): %s", testCases[i].err)) + } + c, handlerCalled := dial(t, &srv, assertErr) + _, err := c.ListContainers(ctx, srv.usr, ListContainersOptions{}) + assertErr(err) + require.True(t, *handlerCalled, testCases[i].name) + } + }) + }) + }) + t.Run("response info handler", func(t *testing.T) { + t.Run("OK", func(t *testing.T) { + srv := srv + srv.epoch = 3598503 + assertErr := func(err error) { require.NoError(t, err) } + respHandlerCalled := false + c, reqHandlerCalled := _dial(t, &srv, assertErr, func(opts *Options) { + opts.SetAPIResponseInfoInterceptor(func(info ResponseMetaInfo) error { + respHandlerCalled = true + require.EqualValues(t, 3598503, info.Epoch()) + require.Equal(t, neofscrypto.PublicKeyBytes(srv.serverSigner.Public()), info.ResponderKey()) + return nil + }) + }) + _, err := c.ListContainers(ctx, srv.usr, ListContainersOptions{}) + assertErr(err) + require.True(t, respHandlerCalled) + require.True(t, *reqHandlerCalled) + }) + t.Run("fail", func(t *testing.T) { + srv := srv + srv.epoch = 4386380643 + assertErr := func(err error) { require.ErrorContains(t, err, "intercept response info: some handler error") } + respHandlerCalled := false + c, reqHandlerCalled := _dial(t, &srv, assertErr, func(opts *Options) { + opts.SetAPIResponseInfoInterceptor(func(info ResponseMetaInfo) error { + if !respHandlerCalled { // dial + respHandlerCalled = true + return nil + } + require.EqualValues(t, 4386380643, info.Epoch()) + require.Equal(t, neofscrypto.PublicKeyBytes(srv.serverSigner.Public()), info.ResponderKey()) + return errors.New("some handler error") + }) + }) + _, err := c.ListContainers(ctx, srv.usr, ListContainersOptions{}) + assertErr(err) + require.True(t, respHandlerCalled) + require.True(t, *reqHandlerCalled) + }) + }) +} + +type deleteContainerServer struct { + noOtherContainerCalls + // client + cnr cid.ID + removerSigner neofscrypto.Signer + session *session.Container + // server + sleepDur time.Duration + endpointInfoOnDialServer + errTransport error + modifyResp func(*apicontainer.DeleteResponse) + corruptRespSig func(*apicontainer.DeleteResponse) +} + +func (x deleteContainerServer) Delete(ctx context.Context, req *apicontainer.DeleteRequest) (*apicontainer.DeleteResponse, error) { + if x.sleepDur > 0 { + time.Sleep(x.sleepDur) + } + if x.errTransport != nil { + return nil, x.errTransport + } + var sts status.Status + resp := apicontainer.DeleteResponse{ + MetaHeader: &apisession.ResponseMetaHeader{Status: &sts, Epoch: x.epoch}, + } + var err error + var cnr cid.ID + sigScheme := refs.SignatureScheme(x.removerSigner.Scheme()) + creatorPubKey := neofscrypto.PublicKeyBytes(x.removerSigner.Public()) + if ctx == nil { + sts.Code, sts.Message = status.InternalServerError, "nil context" + } else if req == nil { + sts.Code, sts.Message = status.InternalServerError, "nil request" + } else if err = neofscrypto.VerifyRequest(req, req.Body); err != nil { + sts.Code, sts.Message = status.SignatureVerificationFail, err.Error() + } else if req.VerifyHeader.BodySignature.Scheme != sigScheme || + !bytes.Equal(req.VerifyHeader.BodySignature.Key, creatorPubKey) { + sts.Code, sts.Message = status.InternalServerError, "[test] unexpected request body signature credentials" + } else if req.VerifyHeader.MetaSignature.Scheme != sigScheme || + !bytes.Equal(req.VerifyHeader.MetaSignature.Key, creatorPubKey) { + sts.Code, sts.Message = status.InternalServerError, "[test] unexpected request meta header signature credentials" + } else if req.VerifyHeader.OriginSignature.Scheme != sigScheme || + !bytes.Equal(req.VerifyHeader.OriginSignature.Key, creatorPubKey) { + sts.Code, sts.Message = status.InternalServerError, "[test] unexpected origin request verification header signature credentials" + } else if req.Body == nil { + sts.Code, sts.Message = status.InternalServerError, "invalid request: missing body" + } else if req.Body.ContainerId == nil { + sts.Code, sts.Message = status.InternalServerError, "invalid request: invalid body: missing ID" + } else if err = cnr.ReadFromV2(req.Body.ContainerId); err != nil { + sts.Code, sts.Message = status.InternalServerError, fmt.Sprintf("invalid request: invalid body: invalid ID field: %s", err) + } else if cnr != x.cnr { + sts.Code, sts.Message = status.InternalServerError, "[test] wrong ID" + } else if req.Body.Signature == nil { + sts.Code, sts.Message = status.InternalServerError, "invalid request: invalid body: missing ID signature" + } else if !bytes.Equal(req.Body.Signature.Key, creatorPubKey) { + sts.Code, sts.Message = status.InternalServerError, "[test] public key in request body differs with the creator's one" + } else if !x.removerSigner.Public().Verify(cnr[:], req.Body.Signature.Sign) { + sts.Code, sts.Message = status.InternalServerError, "[test] wrong ID signature" + } else if x.session != nil { + var sc session.Container + if req.MetaHeader == nil { + sts.Code, sts.Message = status.InternalServerError, "[test] missing request meta header" + } else if req.MetaHeader.SessionToken == nil { + sts.Code, sts.Message = status.InternalServerError, "[test] missing session token" + } else if err = sc.ReadFromV2(req.MetaHeader.SessionToken); err != nil { + sts.Code, sts.Message = status.InternalServerError, fmt.Sprintf("invalid request: invalid meta header: invalid session token: %v", err) + } else if !bytes.Equal(sc.Marshal(), x.session.Marshal()) { + sts.Code, sts.Message = status.InternalServerError, "[test] session token in request differs with the input one" + } + } else if req.MetaHeader != nil { + sts.Code, sts.Message = status.InternalServerError, "invalid request: meta header is set" + } + if sts.Code == 0 { + resp.MetaHeader.Status = nil + } + if x.modifyResp != nil { + x.modifyResp(&resp) + } + resp.VerifyHeader, err = neofscrypto.SignResponse(x.serverSigner, &resp, resp.Body, nil) + if err != nil { + return nil, fmt.Errorf("sign response: %w", err) + } + if x.corruptRespSig != nil { + x.corruptRespSig(&resp) + } + return &resp, nil +} + +func TestClient_DeleteContainer(t *testing.T) { + ctx := context.Background() + var srv deleteContainerServer + srv.sleepDur = 10 * time.Millisecond + srv.serverSigner = neofscryptotest.RandomSigner() + srv.latestVersion = versiontest.Version() + srv.nodeInfo = netmaptest.NodeInfo() + srv.nodeInfo.SetPublicKey(neofscrypto.PublicKeyBytes(srv.serverSigner.Public())) + srv.cnr = cidtest.ID() + srv.removerSigner = neofscryptotest.RandomSignerRFC6979() + _dial := func(t testing.TB, srv *deleteContainerServer, assertErr func(error), customizeOpts func(*Options)) (*Client, *bool) { + var opts Options + var handlerCalled bool + opts.SetAPIRequestResultHandler(func(nodeKey []byte, endpoint string, op stat.Method, dur time.Duration, err error) { + handlerCalled = true + require.Equal(t, srv.nodeInfo.PublicKey(), nodeKey) + require.Equal(t, "localhost:8080", endpoint) + require.Equal(t, stat.MethodContainerDelete, op) + require.Greater(t, dur, srv.sleepDur) + assertErr(err) + }) + if customizeOpts != nil { + customizeOpts(&opts) + } + + c, err := New(anyValidURI, opts) + require.NoError(t, err) + + conn := bufconn.Listen(10 << 10) + gs := grpc.NewServer() + apinetmap.RegisterNetmapServiceServer(gs, srv) + apicontainer.RegisterContainerServiceServer(gs, srv) + go func() { _ = gs.Serve(conn) }() + t.Cleanup(gs.Stop) + + c.dial = func(ctx context.Context, _ string) (net.Conn, error) { return conn.DialContext(ctx) } + require.NoError(t, c.Dial(ctx)) + + return c, &handlerCalled + } + dial := func(t testing.TB, srv *deleteContainerServer, assertErr func(error)) (*Client, *bool) { + return _dial(t, srv, assertErr, nil) + } + t.Run("invalid signer", func(t *testing.T) { + c, err := New(anyValidURI, Options{}) + require.NoError(t, err) + err = c.DeleteContainer(ctx, srv.cnr, nil, DeleteContainerOptions{}) + require.ErrorIs(t, err, errMissingSigner) + err = c.DeleteContainer(ctx, srv.cnr, neofsecdsa.Signer(neofscryptotest.ECDSAPrivateKey()), DeleteContainerOptions{}) + require.EqualError(t, err, "wrong signature scheme: ECDSA_SHA512 instead of ECDSA_RFC6979_SHA256") + }) + t.Run("OK", func(t *testing.T) { + srv := srv + assertErr := func(err error) { require.NoError(t, err) } + c, handlerCalled := dial(t, &srv, assertErr) + err := c.DeleteContainer(ctx, srv.cnr, srv.removerSigner, DeleteContainerOptions{}) + assertErr(err) + require.True(t, *handlerCalled) + t.Run("with session", func(t *testing.T) { + srv := srv + sc := sessiontest.Container() + srv.session = &sc + assertErr := func(err error) { require.NoError(t, err) } + c, handlerCalled := dial(t, &srv, assertErr) + var opts DeleteContainerOptions + opts.WithinSession(sc) + err := c.DeleteContainer(ctx, srv.cnr, srv.removerSigner, opts) + assertErr(err) + require.True(t, *handlerCalled) + }) + }) + t.Run("fail", func(t *testing.T) { + t.Run("sign container", func(t *testing.T) { + srv := srv + srv.sleepDur = 0 + assertErr := func(err error) { require.ErrorContains(t, err, "sign container") } + c, handlerCalled := dial(t, &srv, assertErr) + err := c.DeleteContainer(ctx, srv.cnr, neofscryptotest.FailSigner(srv.removerSigner), DeleteContainerOptions{}) + assertErr(err) + require.True(t, *handlerCalled) + }) + t.Run("sign request", func(t *testing.T) { + srv := srv + srv.sleepDur = 0 + assertErr := func(err error) { require.ErrorContains(t, err, errSignRequest) } + c, handlerCalled := dial(t, &srv, assertErr) + err := c.DeleteContainer(ctx, srv.cnr, newDisposableSigner(srv.removerSigner), DeleteContainerOptions{}) + assertErr(err) + require.True(t, *handlerCalled) + }) + t.Run("transport", func(t *testing.T) { + srv := srv + srv.errTransport = errors.New("any transport failure") + assertErr := func(err error) { + require.ErrorContains(t, err, errTransport) + require.ErrorContains(t, err, "any transport failure") + } + c, handlerCalled := dial(t, &srv, assertErr) + err := c.DeleteContainer(ctx, srv.cnr, srv.removerSigner, DeleteContainerOptions{}) + assertErr(err) + require.True(t, *handlerCalled) + }) + t.Run("invalid response signature", func(t *testing.T) { + for i, testCase := range []struct { + err string + corrupt func(*apicontainer.DeleteResponse) + }{ + {err: "missing verification header", + corrupt: func(r *apicontainer.DeleteResponse) { r.VerifyHeader = nil }, + }, + {err: "missing body signature", + corrupt: func(r *apicontainer.DeleteResponse) { r.VerifyHeader.BodySignature = nil }, + }, + {err: "missing signature of the meta header", + corrupt: func(r *apicontainer.DeleteResponse) { r.VerifyHeader.MetaSignature = nil }, + }, + {err: "missing signature of the origin verification header", + corrupt: func(r *apicontainer.DeleteResponse) { r.VerifyHeader.OriginSignature = nil }, + }, + {err: "verify body signature: missing public key", + corrupt: func(r *apicontainer.DeleteResponse) { r.VerifyHeader.BodySignature.Key = nil }, + }, + {err: "verify signature of the meta header: missing public key", + corrupt: func(r *apicontainer.DeleteResponse) { r.VerifyHeader.MetaSignature.Key = nil }, + }, + {err: "verify signature of the origin verification header: missing public key", + corrupt: func(r *apicontainer.DeleteResponse) { r.VerifyHeader.OriginSignature.Key = nil }, + }, + {err: "verify body signature: decode public key from binary", + corrupt: func(r *apicontainer.DeleteResponse) { + r.VerifyHeader.BodySignature.Key = []byte("not a public key") + }, + }, + {err: "verify signature of the meta header: decode public key from binary", + corrupt: func(r *apicontainer.DeleteResponse) { + r.VerifyHeader.MetaSignature.Key = []byte("not a public key") + }, + }, + {err: "verify signature of the origin verification header: decode public key from binary", + corrupt: func(r *apicontainer.DeleteResponse) { + r.VerifyHeader.OriginSignature.Key = []byte("not a public key") + }, + }, + {err: "verify body signature: invalid scheme -1", + corrupt: func(r *apicontainer.DeleteResponse) { r.VerifyHeader.BodySignature.Scheme = -1 }, + }, + {err: "verify body signature: unsupported scheme 3", + corrupt: func(r *apicontainer.DeleteResponse) { r.VerifyHeader.BodySignature.Scheme = 3 }, + }, + {err: "verify signature of the meta header: unsupported scheme 3", + corrupt: func(r *apicontainer.DeleteResponse) { r.VerifyHeader.MetaSignature.Scheme = 3 }, + }, + {err: "verify signature of the origin verification header: unsupported scheme 3", + corrupt: func(r *apicontainer.DeleteResponse) { r.VerifyHeader.OriginSignature.Scheme = 3 }, + }, + {err: "verify body signature: signature mismatch", + corrupt: func(r *apicontainer.DeleteResponse) { r.VerifyHeader.BodySignature.Sign[0]++ }, + }, + {err: "verify signature of the meta header: signature mismatch", + corrupt: func(r *apicontainer.DeleteResponse) { r.VerifyHeader.MetaSignature.Sign[0]++ }, + }, + {err: "verify signature of the origin verification header: signature mismatch", + corrupt: func(r *apicontainer.DeleteResponse) { r.VerifyHeader.OriginSignature.Sign[0]++ }, + }, + {err: "verify body signature: signature mismatch", + corrupt: func(r *apicontainer.DeleteResponse) { + r.VerifyHeader.BodySignature.Key = neofscrypto.PublicKeyBytes(neofscryptotest.RandomSigner().Public()) + }, + }, + {err: "verify signature of the meta header: signature mismatch", + corrupt: func(r *apicontainer.DeleteResponse) { + r.VerifyHeader.MetaSignature.Key = neofscrypto.PublicKeyBytes(neofscryptotest.RandomSigner().Public()) + }, + }, + {err: "verify signature of the origin verification header: signature mismatch", + corrupt: func(r *apicontainer.DeleteResponse) { + r.VerifyHeader.OriginSignature.Key = neofscrypto.PublicKeyBytes(neofscryptotest.RandomSigner().Public()) + }, + }, + } { + srv := srv + srv.corruptRespSig = testCase.corrupt + assertErr := func(err error) { + require.ErrorContains(t, err, errResponseSignature, [2]any{i, testCase}) + require.ErrorContains(t, err, testCase.err, [2]any{i, testCase}) + } + c, handlerCalled := dial(t, &srv, assertErr) + err := c.DeleteContainer(ctx, srv.cnr, srv.removerSigner, DeleteContainerOptions{}) + assertErr(err) + require.True(t, *handlerCalled) + } + }) + t.Run("invalid response status", func(t *testing.T) { + srv := srv + srv.modifyResp = func(r *apicontainer.DeleteResponse) { + r.MetaHeader.Status = &status.Status{Code: status.InternalServerError, Details: make([]*status.Status_Detail, 1)} + } + assertErr := func(err error) { + require.ErrorContains(t, err, errInvalidResponseStatus) + require.ErrorContains(t, err, "details attached but not supported") + } + c, handlerCalled := dial(t, &srv, assertErr) + err := c.DeleteContainer(ctx, srv.cnr, srv.removerSigner, DeleteContainerOptions{}) + assertErr(err) + require.True(t, *handlerCalled) + }) + t.Run("status errors", func(t *testing.T) { + for _, testCase := range []struct { + code uint32 + errConst error + errVar any + }{ + {code: 1 << 32 / 2}, + {code: status.InternalServerError, errConst: apistatus.ErrServerInternal, errVar: new(apistatus.InternalServerError)}, + {code: status.SignatureVerificationFail, errConst: apistatus.ErrSignatureVerification, errVar: new(apistatus.SignatureVerificationFailure)}, + } { + srv := srv + srv.modifyResp = func(r *apicontainer.DeleteResponse) { + r.MetaHeader.Status = &status.Status{Code: testCase.code, Message: "any message"} + } + assertErr := func(err error) { + require.ErrorIs(t, err, apistatus.Error, testCase) + require.ErrorContains(t, err, "any message", testCase) + if testCase.errConst != nil { + require.ErrorIs(t, err, testCase.errConst, testCase) + } + if testCase.errVar != nil { + require.ErrorAs(t, err, testCase.errVar, testCase) + } + } + c, handlerCalled := dial(t, &srv, assertErr) + err := c.DeleteContainer(ctx, srv.cnr, srv.removerSigner, DeleteContainerOptions{}) + assertErr(err) + require.True(t, *handlerCalled, testCase) + } + }) + }) + t.Run("response info handler", func(t *testing.T) { + t.Run("OK", func(t *testing.T) { + srv := srv + srv.epoch = 3598503 + assertErr := func(err error) { require.NoError(t, err) } + respHandlerCalled := false + c, reqHandlerCalled := _dial(t, &srv, assertErr, func(opts *Options) { + opts.SetAPIResponseInfoInterceptor(func(info ResponseMetaInfo) error { + respHandlerCalled = true + require.EqualValues(t, 3598503, info.Epoch()) + require.Equal(t, neofscrypto.PublicKeyBytes(srv.serverSigner.Public()), info.ResponderKey()) + return nil + }) + }) + err := c.DeleteContainer(ctx, srv.cnr, srv.removerSigner, DeleteContainerOptions{}) + assertErr(err) + require.True(t, respHandlerCalled) + require.True(t, *reqHandlerCalled) + }) + t.Run("fail", func(t *testing.T) { + srv := srv + srv.epoch = 4386380643 + assertErr := func(err error) { require.ErrorContains(t, err, "intercept response info: some handler error") } + respHandlerCalled := false + c, reqHandlerCalled := _dial(t, &srv, assertErr, func(opts *Options) { + opts.SetAPIResponseInfoInterceptor(func(info ResponseMetaInfo) error { + if !respHandlerCalled { // dial + respHandlerCalled = true + return nil + } + require.EqualValues(t, 4386380643, info.Epoch()) + require.Equal(t, neofscrypto.PublicKeyBytes(srv.serverSigner.Public()), info.ResponderKey()) + return errors.New("some handler error") + }) + }) + err := c.DeleteContainer(ctx, srv.cnr, srv.removerSigner, DeleteContainerOptions{}) + assertErr(err) + require.True(t, respHandlerCalled) + require.True(t, *reqHandlerCalled) + }) + }) +} + +type getEACLServer struct { + noOtherContainerCalls + // client + cnr cid.ID + clientSigScheme neofscrypto.Scheme + clientPubKey []byte + // server + sleepDur time.Duration + endpointInfoOnDialServer + eacl eacl.Table + errTransport error + modifyResp func(*apicontainer.GetExtendedACLResponse) + corruptRespSig func(*apicontainer.GetExtendedACLResponse) +} + +func (x getEACLServer) GetExtendedACL(ctx context.Context, req *apicontainer.GetExtendedACLRequest) (*apicontainer.GetExtendedACLResponse, error) { + if x.sleepDur > 0 { + time.Sleep(x.sleepDur) + } + if x.errTransport != nil { + return nil, x.errTransport + } + var sts status.Status + resp := apicontainer.GetExtendedACLResponse{ + MetaHeader: &apisession.ResponseMetaHeader{Status: &sts, Epoch: x.epoch}, + } + var err error + var cnr cid.ID + if ctx == nil { + sts.Code, sts.Message = status.InternalServerError, "nil context" + } else if req == nil { + sts.Code, sts.Message = status.InternalServerError, "nil request" + } else if err = neofscrypto.VerifyRequest(req, req.Body); err != nil { + sts.Code, sts.Message = status.SignatureVerificationFail, err.Error() + } else if req.VerifyHeader.BodySignature.Scheme != refs.SignatureScheme(x.clientSigScheme) || + !bytes.Equal(req.VerifyHeader.BodySignature.Key, x.clientPubKey) { + sts.Code, sts.Message = status.InternalServerError, "[test] unexpected request body signature credentials" + } else if req.VerifyHeader.MetaSignature.Scheme != refs.SignatureScheme(x.clientSigScheme) || + !bytes.Equal(req.VerifyHeader.MetaSignature.Key, x.clientPubKey) { + sts.Code, sts.Message = status.InternalServerError, "[test] unexpected request meta header signature credentials" + } else if req.VerifyHeader.OriginSignature.Scheme != refs.SignatureScheme(x.clientSigScheme) || + !bytes.Equal(req.VerifyHeader.OriginSignature.Key, x.clientPubKey) { + sts.Code, sts.Message = status.InternalServerError, "[test] unexpected origin request verification header signature credentials" + } else if req.MetaHeader != nil { + sts.Code, sts.Message = status.InternalServerError, "meta header is set" + } else if req.Body == nil { + sts.Code, sts.Message = status.InternalServerError, "missing request body" + } else if req.Body.ContainerId == nil { + sts.Code, sts.Message = status.InternalServerError, "invalid request body: missing container" + } else if err = cnr.ReadFromV2(req.Body.ContainerId); err != nil { + sts.Code, sts.Message = status.InternalServerError, fmt.Sprintf("invalid request body: invalid container field: %s", err) + } else if cnr != x.cnr { + sts.Code, sts.Message = status.InternalServerError, "[test] wrong container" + } else { + resp.MetaHeader.Status = nil + resp.Body = &apicontainer.GetExtendedACLResponse_Body{ + Eacl: new(apiacl.EACLTable), + Signature: &refs.SignatureRFC6979{ + Key: []byte("any_public_key"), + Sign: []byte("any_signature"), + }, + SessionToken: &apisession.SessionToken{ + Body: &apisession.SessionToken_Body{ + Id: []byte("any_ID"), + OwnerId: &refs.OwnerID{Value: []byte("any_owner")}, + Lifetime: &apisession.SessionToken_Body_TokenLifetime{ + Exp: rand.Uint64(), + Nbf: rand.Uint64(), + Iat: rand.Uint64(), + }, + SessionKey: []byte("any_session_key"), + Context: &apisession.SessionToken_Body_Object{ + Object: &apisession.ObjectSessionContext{ + Verb: apisession.ObjectSessionContext_Verb(rand.Int31()), + Target: &apisession.ObjectSessionContext_Target{ + Container: &refs.ContainerID{Value: []byte("any_container")}, + Objects: []*refs.ObjectID{{Value: []byte("any_object1")}, {Value: []byte("any_object2")}}, + }, + }, + }, + }, + Signature: &refs.Signature{ + Key: []byte("any_public_key"), + Sign: []byte("any_signature"), + Scheme: refs.SignatureScheme(rand.Int31()), }, }, } + x.eacl.WriteToV2(resp.Body.Eacl) + } + if x.modifyResp != nil { + x.modifyResp(&resp) + } + resp.VerifyHeader, err = neofscrypto.SignResponse(x.serverSigner, &resp, resp.Body, nil) + if err != nil { + return nil, fmt.Errorf("sign response: %w", err) + } + if x.corruptRespSig != nil { + x.corruptRespSig(&resp) + } + return &resp, nil +} - for _, test := range tt { - t.Run(test.name, func(t *testing.T) { - require.ErrorIs(t, test.methodCall(), ErrMissingSigner) +func TestClient_GetEACL(t *testing.T) { + ctx := context.Background() + var srv getEACLServer + srv.sleepDur = 10 * time.Millisecond + srv.serverSigner = neofscryptotest.RandomSigner() + srv.latestVersion = versiontest.Version() + srv.nodeInfo = netmaptest.NodeInfo() + srv.nodeInfo.SetPublicKey(neofscrypto.PublicKeyBytes(srv.serverSigner.Public())) + srv.cnr = cidtest.ID() + srv.eacl = eacltest.Table() + _dial := func(t testing.TB, srv *getEACLServer, assertErr func(error), customizeOpts func(*Options)) (*Client, *bool) { + var opts Options + var handlerCalled bool + opts.SetAPIRequestResultHandler(func(nodeKey []byte, endpoint string, op stat.Method, dur time.Duration, err error) { + handlerCalled = true + require.Equal(t, srv.nodeInfo.PublicKey(), nodeKey) + require.Equal(t, "localhost:8080", endpoint) + require.Equal(t, stat.MethodContainerEACL, op) + require.Greater(t, dur, srv.sleepDur) + assertErr(err) + }) + if customizeOpts != nil { + customizeOpts(&opts) + } + + c, err := New(anyValidURI, opts) + require.NoError(t, err) + srv.clientSigScheme = c.signer.Scheme() + srv.clientPubKey = neofscrypto.PublicKeyBytes(c.signer.Public()) + + conn := bufconn.Listen(10 << 10) + gs := grpc.NewServer() + apinetmap.RegisterNetmapServiceServer(gs, srv) + apicontainer.RegisterContainerServiceServer(gs, srv) + go func() { _ = gs.Serve(conn) }() + t.Cleanup(gs.Stop) + + c.dial = func(ctx context.Context, _ string) (net.Conn, error) { return conn.DialContext(ctx) } + require.NoError(t, c.Dial(ctx)) + + return c, &handlerCalled + } + dial := func(t testing.TB, srv *getEACLServer, assertErr func(error)) (*Client, *bool) { + return _dial(t, srv, assertErr, nil) + } + t.Run("OK", func(t *testing.T) { + srv := srv + assertErr := func(err error) { require.NoError(t, err) } + c, handlerCalled := dial(t, &srv, assertErr) + res, err := c.GetEACL(ctx, srv.cnr, GetEACLOptions{}) + assertErr(err) + require.Equal(t, srv.eacl, res) + require.True(t, *handlerCalled) + }) + t.Run("fail", func(t *testing.T) { + t.Run("sign request", func(t *testing.T) { + srv := srv + srv.sleepDur = 0 + assertErr := func(err error) { require.ErrorContains(t, err, errSignRequest) } + c, handlerCalled := dial(t, &srv, assertErr) + c.signer = neofscryptotest.FailSigner(c.signer) + _, err := c.GetEACL(ctx, srv.cnr, GetEACLOptions{}) + assertErr(err) + require.True(t, *handlerCalled) + }) + t.Run("transport", func(t *testing.T) { + srv := srv + srv.errTransport = errors.New("any transport failure") + assertErr := func(err error) { + require.ErrorContains(t, err, errTransport) + require.ErrorContains(t, err, "any transport failure") + } + c, handlerCalled := dial(t, &srv, assertErr) + _, err := c.GetEACL(ctx, srv.cnr, GetEACLOptions{}) + assertErr(err) + require.True(t, *handlerCalled) + }) + t.Run("invalid response signature", func(t *testing.T) { + for i, testCase := range []struct { + err string + corrupt func(*apicontainer.GetExtendedACLResponse) + }{ + {err: "missing verification header", + corrupt: func(r *apicontainer.GetExtendedACLResponse) { r.VerifyHeader = nil }, + }, + {err: "missing body signature", + corrupt: func(r *apicontainer.GetExtendedACLResponse) { r.VerifyHeader.BodySignature = nil }, + }, + {err: "missing signature of the meta header", + corrupt: func(r *apicontainer.GetExtendedACLResponse) { r.VerifyHeader.MetaSignature = nil }, + }, + {err: "missing signature of the origin verification header", + corrupt: func(r *apicontainer.GetExtendedACLResponse) { r.VerifyHeader.OriginSignature = nil }, + }, + {err: "verify body signature: missing public key", + corrupt: func(r *apicontainer.GetExtendedACLResponse) { r.VerifyHeader.BodySignature.Key = nil }, + }, + {err: "verify signature of the meta header: missing public key", + corrupt: func(r *apicontainer.GetExtendedACLResponse) { r.VerifyHeader.MetaSignature.Key = nil }, + }, + {err: "verify signature of the origin verification header: missing public key", + corrupt: func(r *apicontainer.GetExtendedACLResponse) { r.VerifyHeader.OriginSignature.Key = nil }, + }, + {err: "verify body signature: decode public key from binary", + corrupt: func(r *apicontainer.GetExtendedACLResponse) { + r.VerifyHeader.BodySignature.Key = []byte("not a public key") + }, + }, + {err: "verify signature of the meta header: decode public key from binary", + corrupt: func(r *apicontainer.GetExtendedACLResponse) { + r.VerifyHeader.MetaSignature.Key = []byte("not a public key") + }, + }, + {err: "verify signature of the origin verification header: decode public key from binary", + corrupt: func(r *apicontainer.GetExtendedACLResponse) { + r.VerifyHeader.OriginSignature.Key = []byte("not a public key") + }, + }, + {err: "verify body signature: invalid scheme -1", + corrupt: func(r *apicontainer.GetExtendedACLResponse) { r.VerifyHeader.BodySignature.Scheme = -1 }, + }, + {err: "verify body signature: unsupported scheme 3", + corrupt: func(r *apicontainer.GetExtendedACLResponse) { r.VerifyHeader.BodySignature.Scheme = 3 }, + }, + {err: "verify signature of the meta header: unsupported scheme 3", + corrupt: func(r *apicontainer.GetExtendedACLResponse) { r.VerifyHeader.MetaSignature.Scheme = 3 }, + }, + {err: "verify signature of the origin verification header: unsupported scheme 3", + corrupt: func(r *apicontainer.GetExtendedACLResponse) { r.VerifyHeader.OriginSignature.Scheme = 3 }, + }, + {err: "verify body signature: signature mismatch", + corrupt: func(r *apicontainer.GetExtendedACLResponse) { r.VerifyHeader.BodySignature.Sign[0]++ }, + }, + {err: "verify signature of the meta header: signature mismatch", + corrupt: func(r *apicontainer.GetExtendedACLResponse) { r.VerifyHeader.MetaSignature.Sign[0]++ }, + }, + {err: "verify signature of the origin verification header: signature mismatch", + corrupt: func(r *apicontainer.GetExtendedACLResponse) { r.VerifyHeader.OriginSignature.Sign[0]++ }, + }, + {err: "verify body signature: signature mismatch", + corrupt: func(r *apicontainer.GetExtendedACLResponse) { + r.VerifyHeader.BodySignature.Key = neofscrypto.PublicKeyBytes(neofscryptotest.RandomSigner().Public()) + }, + }, + {err: "verify signature of the meta header: signature mismatch", + corrupt: func(r *apicontainer.GetExtendedACLResponse) { + r.VerifyHeader.MetaSignature.Key = neofscrypto.PublicKeyBytes(neofscryptotest.RandomSigner().Public()) + }, + }, + {err: "verify signature of the origin verification header: signature mismatch", + corrupt: func(r *apicontainer.GetExtendedACLResponse) { + r.VerifyHeader.OriginSignature.Key = neofscrypto.PublicKeyBytes(neofscryptotest.RandomSigner().Public()) + }, + }, + } { + srv := srv + srv.corruptRespSig = testCase.corrupt + assertErr := func(err error) { + require.ErrorContains(t, err, errResponseSignature, [2]any{i, testCase}) + require.ErrorContains(t, err, testCase.err, [2]any{i, testCase}) + } + c, handlerCalled := dial(t, &srv, assertErr) + _, err := c.GetEACL(ctx, srv.cnr, GetEACLOptions{}) + assertErr(err) + require.True(t, *handlerCalled) + } + }) + t.Run("invalid response status", func(t *testing.T) { + srv := srv + srv.modifyResp = func(r *apicontainer.GetExtendedACLResponse) { + r.MetaHeader.Status = &status.Status{Code: status.InternalServerError, Details: make([]*status.Status_Detail, 1)} + } + assertErr := func(err error) { + require.ErrorContains(t, err, errInvalidResponseStatus) + require.ErrorContains(t, err, "details attached but not supported") + } + c, handlerCalled := dial(t, &srv, assertErr) + _, err := c.GetEACL(ctx, srv.cnr, GetEACLOptions{}) + assertErr(err) + require.True(t, *handlerCalled) + }) + t.Run("status errors", func(t *testing.T) { + for _, testCase := range []struct { + code uint32 + errConst error + errVar any + }{ + {code: 1 << 32 / 2}, + {code: status.InternalServerError, errConst: apistatus.ErrServerInternal, errVar: new(apistatus.InternalServerError)}, + {code: status.SignatureVerificationFail, errConst: apistatus.ErrSignatureVerification, errVar: new(apistatus.SignatureVerificationFailure)}, + {code: status.ContainerNotFound, errConst: apistatus.ErrContainerNotFound, errVar: new(apistatus.ContainerNotFound)}, + {code: status.EACLNotFound, errConst: apistatus.ErrEACLNotFound, errVar: new(apistatus.EACLNotFound)}, + } { + srv := srv + srv.modifyResp = func(r *apicontainer.GetExtendedACLResponse) { + r.MetaHeader.Status = &status.Status{Code: testCase.code, Message: "any message"} + } + assertErr := func(err error) { + require.ErrorIs(t, err, apistatus.Error, testCase) + require.ErrorContains(t, err, "any message", testCase) + if testCase.errConst != nil { + require.ErrorIs(t, err, testCase.errConst, testCase) + } + if testCase.errVar != nil { + require.ErrorAs(t, err, testCase.errVar, testCase) + } + } + c, handlerCalled := dial(t, &srv, assertErr) + _, err := c.GetEACL(ctx, srv.cnr, GetEACLOptions{}) + assertErr(err) + require.True(t, *handlerCalled, testCase) + } + }) + t.Run("response body", func(t *testing.T) { + t.Run("missing", func(t *testing.T) { + srv := srv + assertErr := func(err error) { require.EqualError(t, err, "invalid response: missing body") } + c, handlerCalled := dial(t, &srv, assertErr) + srv.modifyResp = func(r *apicontainer.GetExtendedACLResponse) { r.Body = nil } + _, err := c.GetEACL(ctx, srv.cnr, GetEACLOptions{}) + assertErr(err) + require.True(t, *handlerCalled) + }) + t.Run("missing eACL", func(t *testing.T) { + srv := srv + srv.modifyResp = func(r *apicontainer.GetExtendedACLResponse) { r.Body.Eacl = nil } + assertErr := func(err error) { + require.EqualError(t, err, "invalid response: invalid body: missing required field (eACL)") + } + c, handlerCalled := dial(t, &srv, assertErr) + _, err := c.GetEACL(ctx, srv.cnr, GetEACLOptions{}) + assertErr(err) + require.True(t, *handlerCalled) + }) + t.Run("invalid eACL", func(t *testing.T) { + testCases := []struct { + name string + err string + corrupt func(*apiacl.EACLTable) + }{ + {name: "container/empty", err: "invalid container: missing value field", corrupt: func(c *apiacl.EACLTable) { + c.ContainerId = new(refs.ContainerID) + }}, + {name: "container/wrong length", err: "invalid container: invalid value length 31", corrupt: func(c *apiacl.EACLTable) { + c.ContainerId.Value = make([]byte, 31) + }}, + {name: "records/nil", err: "missing records", corrupt: func(c *apiacl.EACLTable) { + c.Records = nil + }}, + {name: "records/empty", err: "missing records", corrupt: func(c *apiacl.EACLTable) { + c.Records = []*apiacl.EACLRecord{} + }}, + {name: "records/targets/nil", err: "invalid record #1: missing target subjects", corrupt: func(c *apiacl.EACLTable) { + c.Records[1].Targets = nil + }}, + {name: "records/targets/empty", err: "invalid record #1: missing target subjects", corrupt: func(c *apiacl.EACLTable) { + c.Records[1].Targets = []*apiacl.EACLRecord_Target{} + }}, + {name: "records/targets/neither keys nor role", err: "invalid record #1: invalid target #2: role and public keys are not mutually exclusive", corrupt: func(c *apiacl.EACLTable) { + c.Records[1].Targets[2].Role, c.Records[1].Targets[2].Keys = 0, nil + }}, + {name: "records/targets/key and role", err: "invalid record #1: invalid target #2: role and public keys are not mutually exclusive", corrupt: func(c *apiacl.EACLTable) { + c.Records[1].Targets[2].Role, c.Records[1].Targets[2].Keys = 1, make([][]byte, 1) + }}, + {name: "filters/missing key", err: "invalid record #1: invalid filter #2: missing key", corrupt: func(c *apiacl.EACLTable) { + c.Records[1].Filters[2].Key = "" + }}, + } + for i := range testCases { + srv := srv + rs := eacltest.NRecords(3) + rs[1].SetTargets(eacltest.NTargets(3)) + rs[1].SetFilters(eacltest.NFilters(3)) + srv.eacl.SetRecords(rs) + srv.modifyResp = func(r *apicontainer.GetExtendedACLResponse) { + testCases[i].corrupt(r.Body.Eacl) + } + assertErr := func(err error) { + require.EqualError(t, err, fmt.Sprintf("invalid response: invalid body: invalid field (eACL): %s", testCases[i].err)) + } + c, handlerCalled := dial(t, &srv, assertErr) + _, err := c.GetEACL(ctx, srv.cnr, GetEACLOptions{}) + assertErr(err) + require.True(t, *handlerCalled, testCases[i].name) + } }) + }) + }) + t.Run("response info handler", func(t *testing.T) { + t.Run("OK", func(t *testing.T) { + srv := srv + srv.epoch = 3598503 + assertErr := func(err error) { require.NoError(t, err) } + respHandlerCalled := false + c, reqHandlerCalled := _dial(t, &srv, assertErr, func(opts *Options) { + opts.SetAPIResponseInfoInterceptor(func(info ResponseMetaInfo) error { + respHandlerCalled = true + require.EqualValues(t, 3598503, info.Epoch()) + require.Equal(t, neofscrypto.PublicKeyBytes(srv.serverSigner.Public()), info.ResponderKey()) + return nil + }) + }) + _, err := c.GetEACL(ctx, srv.cnr, GetEACLOptions{}) + assertErr(err) + require.True(t, respHandlerCalled) + require.True(t, *reqHandlerCalled) + }) + t.Run("fail", func(t *testing.T) { + srv := srv + srv.epoch = 4386380643 + assertErr := func(err error) { require.ErrorContains(t, err, "intercept response info: some handler error") } + respHandlerCalled := false + c, reqHandlerCalled := _dial(t, &srv, assertErr, func(opts *Options) { + opts.SetAPIResponseInfoInterceptor(func(info ResponseMetaInfo) error { + if !respHandlerCalled { // dial + respHandlerCalled = true + return nil + } + require.EqualValues(t, 4386380643, info.Epoch()) + require.Equal(t, neofscrypto.PublicKeyBytes(srv.serverSigner.Public()), info.ResponderKey()) + return errors.New("some handler error") + }) + }) + _, err := c.GetEACL(ctx, srv.cnr, GetEACLOptions{}) + assertErr(err) + require.True(t, respHandlerCalled) + require.True(t, *reqHandlerCalled) + }) + }) +} + +type setEACLServer struct { + noOtherContainerCalls + // client + eacl eacl.Table + setterSigner neofscrypto.Signer + session *session.Container + // server + sleepDur time.Duration + endpointInfoOnDialServer + errTransport error + modifyResp func(*apicontainer.SetExtendedACLResponse) + corruptRespSig func(*apicontainer.SetExtendedACLResponse) +} + +func (x setEACLServer) SetExtendedACL(ctx context.Context, req *apicontainer.SetExtendedACLRequest) (*apicontainer.SetExtendedACLResponse, error) { + if x.sleepDur > 0 { + time.Sleep(x.sleepDur) + } + if x.errTransport != nil { + return nil, x.errTransport + } + var sts status.Status + resp := apicontainer.SetExtendedACLResponse{ + MetaHeader: &apisession.ResponseMetaHeader{Status: &sts, Epoch: x.epoch}, + } + var err error + var eACL eacl.Table + sigScheme := refs.SignatureScheme(x.setterSigner.Scheme()) + creatorPubKey := neofscrypto.PublicKeyBytes(x.setterSigner.Public()) + if ctx == nil { + sts.Code, sts.Message = status.InternalServerError, "nil context" + } else if req == nil { + sts.Code, sts.Message = status.InternalServerError, "nil request" + } else if err = neofscrypto.VerifyRequest(req, req.Body); err != nil { + sts.Code, sts.Message = status.SignatureVerificationFail, err.Error() + } else if req.VerifyHeader.BodySignature.Scheme != sigScheme || + !bytes.Equal(req.VerifyHeader.BodySignature.Key, creatorPubKey) { + sts.Code, sts.Message = status.InternalServerError, "[test] unexpected request body signature credentials" + } else if req.VerifyHeader.MetaSignature.Scheme != sigScheme || + !bytes.Equal(req.VerifyHeader.MetaSignature.Key, creatorPubKey) { + sts.Code, sts.Message = status.InternalServerError, "[test] unexpected request meta header signature credentials" + } else if req.VerifyHeader.OriginSignature.Scheme != sigScheme || + !bytes.Equal(req.VerifyHeader.OriginSignature.Key, creatorPubKey) { + sts.Code, sts.Message = status.InternalServerError, "[test] unexpected origin request verification header signature credentials" + } else if req.Body == nil { + sts.Code, sts.Message = status.InternalServerError, "invalid request: missing body" + } else if req.Body.Eacl == nil { + sts.Code, sts.Message = status.InternalServerError, "invalid request: invalid body: missing eACL" + } else if err = eACL.ReadFromV2(req.Body.Eacl); err != nil { + sts.Code, sts.Message = status.InternalServerError, fmt.Sprintf("invalid request: invalid body: invalid eACL: %s", err) + } else if !bytes.Equal(eACL.Marshal(), x.eacl.Marshal()) { + sts.Code, sts.Message = status.InternalServerError, "[test] wrong eACL" + } else if req.Body.Signature == nil { + sts.Code, sts.Message = status.InternalServerError, "invalid request: invalid body: missing ID signature" + } else if !bytes.Equal(req.Body.Signature.Key, creatorPubKey) { + sts.Code, sts.Message = status.InternalServerError, "[test] public key in request body differs with the creator's one" + } else if !x.setterSigner.Public().Verify(eACL.Marshal(), req.Body.Signature.Sign) { + sts.Code, sts.Message = status.InternalServerError, "[test] wrong eACL signature" + } else if x.session != nil { + var sc session.Container + if req.MetaHeader == nil { + sts.Code, sts.Message = status.InternalServerError, "[test] missing request meta header" + } else if req.MetaHeader.SessionToken == nil { + sts.Code, sts.Message = status.InternalServerError, "[test] missing session token" + } else if err = sc.ReadFromV2(req.MetaHeader.SessionToken); err != nil { + sts.Code, sts.Message = status.InternalServerError, fmt.Sprintf("invalid request: invalid meta header: invalid session token: %v", err) + } else if !bytes.Equal(sc.Marshal(), x.session.Marshal()) { + sts.Code, sts.Message = status.InternalServerError, "[test] session token in request differs with the input one" } + } else if req.MetaHeader != nil { + sts.Code, sts.Message = status.InternalServerError, "invalid request: meta header is set" + } + if sts.Code == 0 { + resp.MetaHeader.Status = nil + } + if x.modifyResp != nil { + x.modifyResp(&resp) + } + resp.VerifyHeader, err = neofscrypto.SignResponse(x.serverSigner, &resp, resp.Body, nil) + if err != nil { + return nil, fmt.Errorf("sign response: %w", err) + } + if x.corruptRespSig != nil { + x.corruptRespSig(&resp) + } + return &resp, nil +} + +func TestClient_SetEACL(t *testing.T) { + ctx := context.Background() + var srv setEACLServer + srv.sleepDur = 10 * time.Millisecond + srv.serverSigner = neofscryptotest.RandomSigner() + srv.latestVersion = versiontest.Version() + srv.nodeInfo = netmaptest.NodeInfo() + srv.nodeInfo.SetPublicKey(neofscrypto.PublicKeyBytes(srv.serverSigner.Public())) + srv.eacl = eacltest.Table() + srv.setterSigner = neofscryptotest.RandomSignerRFC6979() + _dial := func(t testing.TB, srv *setEACLServer, assertErr func(error), customizeOpts func(*Options)) (*Client, *bool) { + var opts Options + var handlerCalled bool + opts.SetAPIRequestResultHandler(func(nodeKey []byte, endpoint string, op stat.Method, dur time.Duration, err error) { + handlerCalled = true + require.Equal(t, srv.nodeInfo.PublicKey(), nodeKey) + require.Equal(t, "localhost:8080", endpoint) + require.Equal(t, stat.MethodContainerSetEACL, op) + require.Greater(t, dur, srv.sleepDur) + assertErr(err) + }) + if customizeOpts != nil { + customizeOpts(&opts) + } + + c, err := New(anyValidURI, opts) + require.NoError(t, err) + + conn := bufconn.Listen(10 << 10) + gs := grpc.NewServer() + apinetmap.RegisterNetmapServiceServer(gs, srv) + apicontainer.RegisterContainerServiceServer(gs, srv) + go func() { _ = gs.Serve(conn) }() + t.Cleanup(gs.Stop) + + c.dial = func(ctx context.Context, _ string) (net.Conn, error) { return conn.DialContext(ctx) } + require.NoError(t, c.Dial(ctx)) + + return c, &handlerCalled + } + dial := func(t testing.TB, srv *setEACLServer, assertErr func(error)) (*Client, *bool) { + return _dial(t, srv, assertErr, nil) + } + t.Run("invalid signer", func(t *testing.T) { + c, err := New(anyValidURI, Options{}) + require.NoError(t, err) + err = c.SetEACL(ctx, srv.eacl, nil, SetEACLOptions{}) + require.ErrorIs(t, err, errMissingSigner) + err = c.SetEACL(ctx, srv.eacl, neofsecdsa.Signer(neofscryptotest.ECDSAPrivateKey()), SetEACLOptions{}) + require.EqualError(t, err, "wrong signature scheme: ECDSA_SHA512 instead of ECDSA_RFC6979_SHA256") + }) + t.Run("unbound container", func(t *testing.T) { + c, err := New(anyValidURI, Options{}) + require.NoError(t, err) + err = c.SetEACL(ctx, eacl.Table{}, srv.setterSigner, SetEACLOptions{}) + require.EqualError(t, err, "missing container in the eACL") + }) + t.Run("OK", func(t *testing.T) { + srv := srv + assertErr := func(err error) { require.NoError(t, err) } + c, handlerCalled := dial(t, &srv, assertErr) + err := c.SetEACL(ctx, srv.eacl, srv.setterSigner, SetEACLOptions{}) + assertErr(err) + require.True(t, *handlerCalled) + t.Run("with session", func(t *testing.T) { + srv := srv + sc := sessiontest.Container() + srv.session = &sc + assertErr := func(err error) { require.NoError(t, err) } + c, handlerCalled := dial(t, &srv, assertErr) + var opts SetEACLOptions + opts.WithinSession(sc) + err := c.SetEACL(ctx, srv.eacl, srv.setterSigner, opts) + assertErr(err) + require.True(t, *handlerCalled) + }) + }) + t.Run("fail", func(t *testing.T) { + t.Run("sign eACL", func(t *testing.T) { + srv := srv + srv.sleepDur = 0 + assertErr := func(err error) { require.ErrorContains(t, err, "sign eACL") } + c, handlerCalled := dial(t, &srv, assertErr) + err := c.SetEACL(ctx, srv.eacl, neofscryptotest.FailSigner(srv.setterSigner), SetEACLOptions{}) + assertErr(err) + require.True(t, *handlerCalled) + }) + t.Run("sign request", func(t *testing.T) { + srv := srv + srv.sleepDur = 0 + assertErr := func(err error) { require.ErrorContains(t, err, errSignRequest) } + c, handlerCalled := dial(t, &srv, assertErr) + err := c.SetEACL(ctx, srv.eacl, newDisposableSigner(srv.setterSigner), SetEACLOptions{}) + assertErr(err) + require.True(t, *handlerCalled) + }) + t.Run("transport", func(t *testing.T) { + srv := srv + srv.errTransport = errors.New("any transport failure") + assertErr := func(err error) { + require.ErrorContains(t, err, errTransport) + require.ErrorContains(t, err, "any transport failure") + } + c, handlerCalled := dial(t, &srv, assertErr) + err := c.SetEACL(ctx, srv.eacl, srv.setterSigner, SetEACLOptions{}) + assertErr(err) + require.True(t, *handlerCalled) + }) + t.Run("invalid response signature", func(t *testing.T) { + for i, testCase := range []struct { + err string + corrupt func(*apicontainer.SetExtendedACLResponse) + }{ + {err: "missing verification header", + corrupt: func(r *apicontainer.SetExtendedACLResponse) { r.VerifyHeader = nil }, + }, + {err: "missing body signature", + corrupt: func(r *apicontainer.SetExtendedACLResponse) { r.VerifyHeader.BodySignature = nil }, + }, + {err: "missing signature of the meta header", + corrupt: func(r *apicontainer.SetExtendedACLResponse) { r.VerifyHeader.MetaSignature = nil }, + }, + {err: "missing signature of the origin verification header", + corrupt: func(r *apicontainer.SetExtendedACLResponse) { r.VerifyHeader.OriginSignature = nil }, + }, + {err: "verify body signature: missing public key", + corrupt: func(r *apicontainer.SetExtendedACLResponse) { r.VerifyHeader.BodySignature.Key = nil }, + }, + {err: "verify signature of the meta header: missing public key", + corrupt: func(r *apicontainer.SetExtendedACLResponse) { r.VerifyHeader.MetaSignature.Key = nil }, + }, + {err: "verify signature of the origin verification header: missing public key", + corrupt: func(r *apicontainer.SetExtendedACLResponse) { r.VerifyHeader.OriginSignature.Key = nil }, + }, + {err: "verify body signature: decode public key from binary", + corrupt: func(r *apicontainer.SetExtendedACLResponse) { + r.VerifyHeader.BodySignature.Key = []byte("not a public key") + }, + }, + {err: "verify signature of the meta header: decode public key from binary", + corrupt: func(r *apicontainer.SetExtendedACLResponse) { + r.VerifyHeader.MetaSignature.Key = []byte("not a public key") + }, + }, + {err: "verify signature of the origin verification header: decode public key from binary", + corrupt: func(r *apicontainer.SetExtendedACLResponse) { + r.VerifyHeader.OriginSignature.Key = []byte("not a public key") + }, + }, + {err: "verify body signature: invalid scheme -1", + corrupt: func(r *apicontainer.SetExtendedACLResponse) { r.VerifyHeader.BodySignature.Scheme = -1 }, + }, + {err: "verify body signature: unsupported scheme 3", + corrupt: func(r *apicontainer.SetExtendedACLResponse) { r.VerifyHeader.BodySignature.Scheme = 3 }, + }, + {err: "verify signature of the meta header: unsupported scheme 3", + corrupt: func(r *apicontainer.SetExtendedACLResponse) { r.VerifyHeader.MetaSignature.Scheme = 3 }, + }, + {err: "verify signature of the origin verification header: unsupported scheme 3", + corrupt: func(r *apicontainer.SetExtendedACLResponse) { r.VerifyHeader.OriginSignature.Scheme = 3 }, + }, + {err: "verify body signature: signature mismatch", + corrupt: func(r *apicontainer.SetExtendedACLResponse) { r.VerifyHeader.BodySignature.Sign[0]++ }, + }, + {err: "verify signature of the meta header: signature mismatch", + corrupt: func(r *apicontainer.SetExtendedACLResponse) { r.VerifyHeader.MetaSignature.Sign[0]++ }, + }, + {err: "verify signature of the origin verification header: signature mismatch", + corrupt: func(r *apicontainer.SetExtendedACLResponse) { r.VerifyHeader.OriginSignature.Sign[0]++ }, + }, + {err: "verify body signature: signature mismatch", + corrupt: func(r *apicontainer.SetExtendedACLResponse) { + r.VerifyHeader.BodySignature.Key = neofscrypto.PublicKeyBytes(neofscryptotest.RandomSigner().Public()) + }, + }, + {err: "verify signature of the meta header: signature mismatch", + corrupt: func(r *apicontainer.SetExtendedACLResponse) { + r.VerifyHeader.MetaSignature.Key = neofscrypto.PublicKeyBytes(neofscryptotest.RandomSigner().Public()) + }, + }, + {err: "verify signature of the origin verification header: signature mismatch", + corrupt: func(r *apicontainer.SetExtendedACLResponse) { + r.VerifyHeader.OriginSignature.Key = neofscrypto.PublicKeyBytes(neofscryptotest.RandomSigner().Public()) + }, + }, + } { + srv := srv + srv.corruptRespSig = testCase.corrupt + assertErr := func(err error) { + require.ErrorContains(t, err, errResponseSignature, [2]any{i, testCase}) + require.ErrorContains(t, err, testCase.err, [2]any{i, testCase}) + } + c, handlerCalled := dial(t, &srv, assertErr) + err := c.SetEACL(ctx, srv.eacl, srv.setterSigner, SetEACLOptions{}) + assertErr(err) + require.True(t, *handlerCalled) + } + }) + t.Run("invalid response status", func(t *testing.T) { + srv := srv + srv.modifyResp = func(r *apicontainer.SetExtendedACLResponse) { + r.MetaHeader.Status = &status.Status{Code: status.InternalServerError, Details: make([]*status.Status_Detail, 1)} + } + assertErr := func(err error) { + require.ErrorContains(t, err, errInvalidResponseStatus) + require.ErrorContains(t, err, "details attached but not supported") + } + c, handlerCalled := dial(t, &srv, assertErr) + err := c.SetEACL(ctx, srv.eacl, srv.setterSigner, SetEACLOptions{}) + assertErr(err) + require.True(t, *handlerCalled) + }) + t.Run("status errors", func(t *testing.T) { + for _, testCase := range []struct { + code uint32 + errConst error + errVar any + }{ + {code: 1 << 32 / 2}, + {code: status.InternalServerError, errConst: apistatus.ErrServerInternal, errVar: new(apistatus.InternalServerError)}, + {code: status.SignatureVerificationFail, errConst: apistatus.ErrSignatureVerification, errVar: new(apistatus.SignatureVerificationFailure)}, + {code: status.ContainerNotFound, errConst: apistatus.ErrContainerNotFound, errVar: new(apistatus.ContainerNotFound)}, + } { + srv := srv + srv.modifyResp = func(r *apicontainer.SetExtendedACLResponse) { + r.MetaHeader.Status = &status.Status{Code: testCase.code, Message: "any message"} + } + assertErr := func(err error) { + require.ErrorIs(t, err, apistatus.Error, testCase) + require.ErrorContains(t, err, "any message", testCase) + if testCase.errConst != nil { + require.ErrorIs(t, err, testCase.errConst, testCase) + } + if testCase.errVar != nil { + require.ErrorAs(t, err, testCase.errVar, testCase) + } + } + c, handlerCalled := dial(t, &srv, assertErr) + err := c.SetEACL(ctx, srv.eacl, srv.setterSigner, SetEACLOptions{}) + assertErr(err) + require.True(t, *handlerCalled, testCase) + } + }) + }) + t.Run("response info handler", func(t *testing.T) { + t.Run("OK", func(t *testing.T) { + srv := srv + srv.epoch = 3598503 + assertErr := func(err error) { require.NoError(t, err) } + respHandlerCalled := false + c, reqHandlerCalled := _dial(t, &srv, assertErr, func(opts *Options) { + opts.SetAPIResponseInfoInterceptor(func(info ResponseMetaInfo) error { + respHandlerCalled = true + require.EqualValues(t, 3598503, info.Epoch()) + require.Equal(t, neofscrypto.PublicKeyBytes(srv.serverSigner.Public()), info.ResponderKey()) + return nil + }) + }) + err := c.SetEACL(ctx, srv.eacl, srv.setterSigner, SetEACLOptions{}) + assertErr(err) + require.True(t, respHandlerCalled) + require.True(t, *reqHandlerCalled) + }) + t.Run("fail", func(t *testing.T) { + srv := srv + srv.epoch = 4386380643 + assertErr := func(err error) { require.ErrorContains(t, err, "intercept response info: some handler error") } + respHandlerCalled := false + c, reqHandlerCalled := _dial(t, &srv, assertErr, func(opts *Options) { + opts.SetAPIResponseInfoInterceptor(func(info ResponseMetaInfo) error { + if !respHandlerCalled { // dial + respHandlerCalled = true + return nil + } + require.EqualValues(t, 4386380643, info.Epoch()) + require.Equal(t, neofscrypto.PublicKeyBytes(srv.serverSigner.Public()), info.ResponderKey()) + return errors.New("some handler error") + }) + }) + err := c.SetEACL(ctx, srv.eacl, srv.setterSigner, SetEACLOptions{}) + assertErr(err) + require.True(t, respHandlerCalled) + require.True(t, *reqHandlerCalled) + }) + }) +} + +type sendContainerSizeEstimationsServer struct { + noOtherContainerCalls + // client + estimations []container.SizeEstimation + clientSigScheme neofscrypto.Scheme + clientPubKey []byte + // server + sleepDur time.Duration + endpointInfoOnDialServer + errTransport error + modifyResp func(*apicontainer.AnnounceUsedSpaceResponse) + corruptRespSig func(*apicontainer.AnnounceUsedSpaceResponse) +} + +func (x sendContainerSizeEstimationsServer) AnnounceUsedSpace(ctx context.Context, req *apicontainer.AnnounceUsedSpaceRequest) (*apicontainer.AnnounceUsedSpaceResponse, error) { + if x.sleepDur > 0 { + time.Sleep(x.sleepDur) + } + if x.errTransport != nil { + return nil, x.errTransport + } + var sts status.Status + resp := apicontainer.AnnounceUsedSpaceResponse{ + MetaHeader: &apisession.ResponseMetaHeader{Status: &sts, Epoch: x.epoch}, + } + var err error + if ctx == nil { + sts.Code, sts.Message = status.InternalServerError, "nil context" + } else if req == nil { + sts.Code, sts.Message = status.InternalServerError, "nil request" + } else if err = neofscrypto.VerifyRequest(req, req.Body); err != nil { + sts.Code, sts.Message = status.SignatureVerificationFail, err.Error() + } else if req.VerifyHeader.BodySignature.Scheme != refs.SignatureScheme(x.clientSigScheme) || + !bytes.Equal(req.VerifyHeader.BodySignature.Key, x.clientPubKey) { + sts.Code, sts.Message = status.InternalServerError, "[test] unexpected request body signature credentials" + } else if req.VerifyHeader.MetaSignature.Scheme != refs.SignatureScheme(x.clientSigScheme) || + !bytes.Equal(req.VerifyHeader.MetaSignature.Key, x.clientPubKey) { + sts.Code, sts.Message = status.InternalServerError, "[test] unexpected request meta header signature credentials" + } else if req.VerifyHeader.OriginSignature.Scheme != refs.SignatureScheme(x.clientSigScheme) || + !bytes.Equal(req.VerifyHeader.OriginSignature.Key, x.clientPubKey) { + sts.Code, sts.Message = status.InternalServerError, "[test] unexpected origin request verification header signature credentials" + } else if req.MetaHeader != nil { + sts.Code, sts.Message = status.InternalServerError, "meta header is set" + } else if req.Body == nil { + sts.Code, sts.Message = status.InternalServerError, "missing request body" + } else if len(req.Body.Announcements) == 0 { + sts.Code, sts.Message = status.InternalServerError, "invalid request body: missing estimations" + } else if len(req.Body.Announcements) != len(x.estimations) { + sts.Code, sts.Message = status.InternalServerError, "[test] invalid request body: wrong number of estimations" + } else { + var est container.SizeEstimation + for i := range req.Body.Announcements { + if req.Body.Announcements[i] == nil { + sts.Code, sts.Message = status.InternalServerError, fmt.Sprintf("nil estimation #%d", i) + break + } else if err = est.ReadFromV2(req.Body.Announcements[i]); err != nil { + sts.Code, sts.Message = status.InternalServerError, fmt.Sprintf("invalid estimation #%d: %v", i, err) + break + } else if est != x.estimations[i] { + sts.Code, sts.Message = status.InternalServerError, fmt.Sprintf("[test] wrong estimation #%d", i) + break + } + } + } + if x.modifyResp != nil { + x.modifyResp(&resp) + } + resp.VerifyHeader, err = neofscrypto.SignResponse(x.serverSigner, &resp, resp.Body, nil) + if err != nil { + return nil, fmt.Errorf("sign response: %w", err) + } + if x.corruptRespSig != nil { + x.corruptRespSig(&resp) + } + return &resp, nil +} + +func TestClient_SendContainerSizeEstimations(t *testing.T) { + ctx := context.Background() + var srv sendContainerSizeEstimationsServer + srv.sleepDur = 10 * time.Millisecond + srv.serverSigner = neofscryptotest.RandomSigner() + srv.latestVersion = versiontest.Version() + srv.nodeInfo = netmaptest.NodeInfo() + srv.nodeInfo.SetPublicKey(neofscrypto.PublicKeyBytes(srv.serverSigner.Public())) + srv.estimations = make([]container.SizeEstimation, 3) + for i := range srv.estimations { + srv.estimations[i] = containertest.SizeEstimation() + } + _dial := func(t testing.TB, srv *sendContainerSizeEstimationsServer, assertErr func(error), customizeOpts func(*Options)) (*Client, *bool) { + var opts Options + var handlerCalled bool + opts.SetAPIRequestResultHandler(func(nodeKey []byte, endpoint string, op stat.Method, dur time.Duration, err error) { + handlerCalled = true + require.Equal(t, srv.nodeInfo.PublicKey(), nodeKey) + require.Equal(t, "localhost:8080", endpoint) + require.Equal(t, stat.MethodContainerAnnounceUsedSpace, op) + require.Greater(t, dur, srv.sleepDur) + assertErr(err) + }) + if customizeOpts != nil { + customizeOpts(&opts) + } + + c, err := New(anyValidURI, opts) + require.NoError(t, err) + srv.clientSigScheme = c.signer.Scheme() + srv.clientPubKey = neofscrypto.PublicKeyBytes(c.signer.Public()) + + conn := bufconn.Listen(10 << 10) + gs := grpc.NewServer() + apinetmap.RegisterNetmapServiceServer(gs, srv) + apicontainer.RegisterContainerServiceServer(gs, srv) + go func() { _ = gs.Serve(conn) }() + t.Cleanup(gs.Stop) + + c.dial = func(ctx context.Context, _ string) (net.Conn, error) { return conn.DialContext(ctx) } + require.NoError(t, c.Dial(ctx)) + + return c, &handlerCalled + } + dial := func(t testing.TB, srv *sendContainerSizeEstimationsServer, assertErr func(error)) (*Client, *bool) { + return _dial(t, srv, assertErr, nil) + } + t.Run("missing estimations", func(t *testing.T) { + c, err := New(anyValidURI, Options{}) + require.NoError(t, err) + err = c.SendContainerSizeEstimations(ctx, nil, SendContainerSizeEstimationsOptions{}) + require.EqualError(t, err, "missing estimations") + err = c.SendContainerSizeEstimations(ctx, []container.SizeEstimation{}, SendContainerSizeEstimationsOptions{}) + require.EqualError(t, err, "missing estimations") + }) + t.Run("OK", func(t *testing.T) { + srv := srv + assertErr := func(err error) { require.NoError(t, err) } + c, handlerCalled := dial(t, &srv, assertErr) + err := c.SendContainerSizeEstimations(ctx, srv.estimations, SendContainerSizeEstimationsOptions{}) + assertErr(err) + require.True(t, *handlerCalled) + }) + t.Run("fail", func(t *testing.T) { + t.Run("sign request", func(t *testing.T) { + srv := srv + srv.sleepDur = 0 + assertErr := func(err error) { require.ErrorContains(t, err, errSignRequest) } + c, handlerCalled := dial(t, &srv, assertErr) + c.signer = neofscryptotest.FailSigner(c.signer) + err := c.SendContainerSizeEstimations(ctx, srv.estimations, SendContainerSizeEstimationsOptions{}) + assertErr(err) + require.True(t, *handlerCalled) + }) + t.Run("transport", func(t *testing.T) { + srv := srv + srv.errTransport = errors.New("any transport failure") + assertErr := func(err error) { + require.ErrorContains(t, err, errTransport) + require.ErrorContains(t, err, "any transport failure") + } + c, handlerCalled := dial(t, &srv, assertErr) + err := c.SendContainerSizeEstimations(ctx, srv.estimations, SendContainerSizeEstimationsOptions{}) + assertErr(err) + require.True(t, *handlerCalled) + }) + t.Run("invalid response signature", func(t *testing.T) { + for i, testCase := range []struct { + err string + corrupt func(*apicontainer.AnnounceUsedSpaceResponse) + }{ + {err: "missing verification header", + corrupt: func(r *apicontainer.AnnounceUsedSpaceResponse) { r.VerifyHeader = nil }, + }, + {err: "missing body signature", + corrupt: func(r *apicontainer.AnnounceUsedSpaceResponse) { r.VerifyHeader.BodySignature = nil }, + }, + {err: "missing signature of the meta header", + corrupt: func(r *apicontainer.AnnounceUsedSpaceResponse) { r.VerifyHeader.MetaSignature = nil }, + }, + {err: "missing signature of the origin verification header", + corrupt: func(r *apicontainer.AnnounceUsedSpaceResponse) { r.VerifyHeader.OriginSignature = nil }, + }, + {err: "verify body signature: missing public key", + corrupt: func(r *apicontainer.AnnounceUsedSpaceResponse) { r.VerifyHeader.BodySignature.Key = nil }, + }, + {err: "verify signature of the meta header: missing public key", + corrupt: func(r *apicontainer.AnnounceUsedSpaceResponse) { r.VerifyHeader.MetaSignature.Key = nil }, + }, + {err: "verify signature of the origin verification header: missing public key", + corrupt: func(r *apicontainer.AnnounceUsedSpaceResponse) { r.VerifyHeader.OriginSignature.Key = nil }, + }, + {err: "verify body signature: decode public key from binary", + corrupt: func(r *apicontainer.AnnounceUsedSpaceResponse) { + r.VerifyHeader.BodySignature.Key = []byte("not a public key") + }, + }, + {err: "verify signature of the meta header: decode public key from binary", + corrupt: func(r *apicontainer.AnnounceUsedSpaceResponse) { + r.VerifyHeader.MetaSignature.Key = []byte("not a public key") + }, + }, + {err: "verify signature of the origin verification header: decode public key from binary", + corrupt: func(r *apicontainer.AnnounceUsedSpaceResponse) { + r.VerifyHeader.OriginSignature.Key = []byte("not a public key") + }, + }, + {err: "verify body signature: invalid scheme -1", + corrupt: func(r *apicontainer.AnnounceUsedSpaceResponse) { r.VerifyHeader.BodySignature.Scheme = -1 }, + }, + {err: "verify body signature: unsupported scheme 3", + corrupt: func(r *apicontainer.AnnounceUsedSpaceResponse) { r.VerifyHeader.BodySignature.Scheme = 3 }, + }, + {err: "verify signature of the meta header: unsupported scheme 3", + corrupt: func(r *apicontainer.AnnounceUsedSpaceResponse) { r.VerifyHeader.MetaSignature.Scheme = 3 }, + }, + {err: "verify signature of the origin verification header: unsupported scheme 3", + corrupt: func(r *apicontainer.AnnounceUsedSpaceResponse) { r.VerifyHeader.OriginSignature.Scheme = 3 }, + }, + {err: "verify body signature: signature mismatch", + corrupt: func(r *apicontainer.AnnounceUsedSpaceResponse) { r.VerifyHeader.BodySignature.Sign[0]++ }, + }, + {err: "verify signature of the meta header: signature mismatch", + corrupt: func(r *apicontainer.AnnounceUsedSpaceResponse) { r.VerifyHeader.MetaSignature.Sign[0]++ }, + }, + {err: "verify signature of the origin verification header: signature mismatch", + corrupt: func(r *apicontainer.AnnounceUsedSpaceResponse) { r.VerifyHeader.OriginSignature.Sign[0]++ }, + }, + {err: "verify body signature: signature mismatch", + corrupt: func(r *apicontainer.AnnounceUsedSpaceResponse) { + r.VerifyHeader.BodySignature.Key = neofscrypto.PublicKeyBytes(neofscryptotest.RandomSigner().Public()) + }, + }, + {err: "verify signature of the meta header: signature mismatch", + corrupt: func(r *apicontainer.AnnounceUsedSpaceResponse) { + r.VerifyHeader.MetaSignature.Key = neofscrypto.PublicKeyBytes(neofscryptotest.RandomSigner().Public()) + }, + }, + {err: "verify signature of the origin verification header: signature mismatch", + corrupt: func(r *apicontainer.AnnounceUsedSpaceResponse) { + r.VerifyHeader.OriginSignature.Key = neofscrypto.PublicKeyBytes(neofscryptotest.RandomSigner().Public()) + }, + }, + } { + srv := srv + srv.corruptRespSig = testCase.corrupt + assertErr := func(err error) { + require.ErrorContains(t, err, errResponseSignature, [2]any{i, testCase}) + require.ErrorContains(t, err, testCase.err, [2]any{i, testCase}) + } + c, handlerCalled := dial(t, &srv, assertErr) + err := c.SendContainerSizeEstimations(ctx, srv.estimations, SendContainerSizeEstimationsOptions{}) + assertErr(err) + require.True(t, *handlerCalled) + } + }) + t.Run("invalid response status", func(t *testing.T) { + srv := srv + srv.modifyResp = func(r *apicontainer.AnnounceUsedSpaceResponse) { + r.MetaHeader.Status = &status.Status{Code: status.InternalServerError, Details: make([]*status.Status_Detail, 1)} + } + assertErr := func(err error) { + require.ErrorContains(t, err, errInvalidResponseStatus) + require.ErrorContains(t, err, "details attached but not supported") + } + c, handlerCalled := dial(t, &srv, assertErr) + err := c.SendContainerSizeEstimations(ctx, srv.estimations, SendContainerSizeEstimationsOptions{}) + assertErr(err) + require.True(t, *handlerCalled) + }) + t.Run("status errors", func(t *testing.T) { + for _, testCase := range []struct { + code uint32 + errConst error + errVar any + }{ + {code: 1 << 32 / 2}, + {code: status.InternalServerError, errConst: apistatus.ErrServerInternal, errVar: new(apistatus.InternalServerError)}, + {code: status.SignatureVerificationFail, errConst: apistatus.ErrSignatureVerification, errVar: new(apistatus.SignatureVerificationFailure)}, + } { + srv := srv + srv.modifyResp = func(r *apicontainer.AnnounceUsedSpaceResponse) { + r.MetaHeader.Status = &status.Status{Code: testCase.code, Message: "any message"} + } + assertErr := func(err error) { + require.ErrorIs(t, err, apistatus.Error, testCase) + require.ErrorContains(t, err, "any message", testCase) + if testCase.errConst != nil { + require.ErrorIs(t, err, testCase.errConst, testCase) + } + if testCase.errVar != nil { + require.ErrorAs(t, err, testCase.errVar, testCase) + } + } + c, handlerCalled := dial(t, &srv, assertErr) + err := c.SendContainerSizeEstimations(ctx, srv.estimations, SendContainerSizeEstimationsOptions{}) + assertErr(err) + require.True(t, *handlerCalled, testCase) + } + }) + }) + t.Run("response info handler", func(t *testing.T) { + t.Run("OK", func(t *testing.T) { + srv := srv + srv.epoch = 3598503 + assertErr := func(err error) { require.NoError(t, err) } + respHandlerCalled := false + c, reqHandlerCalled := _dial(t, &srv, assertErr, func(opts *Options) { + opts.SetAPIResponseInfoInterceptor(func(info ResponseMetaInfo) error { + respHandlerCalled = true + require.EqualValues(t, 3598503, info.Epoch()) + require.Equal(t, neofscrypto.PublicKeyBytes(srv.serverSigner.Public()), info.ResponderKey()) + return nil + }) + }) + err := c.SendContainerSizeEstimations(ctx, srv.estimations, SendContainerSizeEstimationsOptions{}) + assertErr(err) + require.True(t, respHandlerCalled) + require.True(t, *reqHandlerCalled) + }) + t.Run("fail", func(t *testing.T) { + srv := srv + srv.epoch = 4386380643 + assertErr := func(err error) { require.ErrorContains(t, err, "intercept response info: some handler error") } + respHandlerCalled := false + c, reqHandlerCalled := _dial(t, &srv, assertErr, func(opts *Options) { + opts.SetAPIResponseInfoInterceptor(func(info ResponseMetaInfo) error { + if !respHandlerCalled { // dial + respHandlerCalled = true + return nil + } + require.EqualValues(t, 4386380643, info.Epoch()) + require.Equal(t, neofscrypto.PublicKeyBytes(srv.serverSigner.Public()), info.ResponderKey()) + return errors.New("some handler error") + }) + }) + err := c.SendContainerSizeEstimations(ctx, srv.estimations, SendContainerSizeEstimationsOptions{}) + assertErr(err) + require.True(t, respHandlerCalled) + require.True(t, *reqHandlerCalled) + }) }) } diff --git a/client/doc.go b/client/doc.go index 21dc30cbc..1cf672f16 100644 --- a/client/doc.go +++ b/client/doc.go @@ -1,7 +1,7 @@ /* Package client provides NeoFS API client implementation. -The main component is Client type. It is a virtual connection to the network +The main component is [Client] type. It is a virtual connection to the network and provides methods for executing operations on the server. */ package client diff --git a/client/errors.go b/client/errors.go index d51cc7fc5..a4c3f5ca5 100644 --- a/client/errors.go +++ b/client/errors.go @@ -13,16 +13,13 @@ var ( // ErrMissingAccount is returned when account/owner is not provided. ErrMissingAccount = errors.New("missing account") - // ErrMissingSigner is returned when signer is not provided. - ErrMissingSigner = errors.New("missing signer") + errMissingSigner = errors.New("missing signer") // ErrMissingEACLContainer is returned when container info is not provided in eACL table. ErrMissingEACLContainer = errors.New("missing container in eACL table") // ErrMissingAnnouncements is returned when announcements are not provided. ErrMissingAnnouncements = errors.New("missing announcements") // ErrZeroRangeLength is returned when range parameter has zero length. ErrZeroRangeLength = errors.New("zero range length") - // ErrMissingRanges is returned when empty ranges list is provided. - ErrMissingRanges = errors.New("missing ranges") // ErrZeroEpoch is returned when zero epoch is provided. ErrZeroEpoch = errors.New("zero epoch") // ErrMissingTrusts is returned when empty slice of trusts is provided. diff --git a/client/example_container_put_test.go b/client/example_container_put_test.go index 65d076c09..0c1c67bd7 100644 --- a/client/example_container_put_test.go +++ b/client/example_container_put_test.go @@ -1,105 +1,89 @@ package client_test -import ( - "context" - "fmt" - "time" - - "github.com/nspcc-dev/neo-go/pkg/crypto/keys" - netmapv2 "github.com/nspcc-dev/neofs-api-go/v2/netmap" - "github.com/nspcc-dev/neofs-sdk-go/client" - "github.com/nspcc-dev/neofs-sdk-go/container" - "github.com/nspcc-dev/neofs-sdk-go/container/acl" - cid "github.com/nspcc-dev/neofs-sdk-go/container/id" - "github.com/nspcc-dev/neofs-sdk-go/netmap" - "github.com/nspcc-dev/neofs-sdk-go/user" - "github.com/nspcc-dev/neofs-sdk-go/waiter" -) - -// Put a new container into NeoFS. -func ExampleClient_ContainerPut() { - ctx := context.Background() - var accountID user.ID - - // The account was taken from https://github.com/nspcc-dev/neofs-aio - key, err := keys.NEP2Decrypt("6PYM8VdX2BSm7BSXKzV4Fz6S3R9cDLLWNrD9nMjxW352jEv3fsC8N3wNLY", "one", keys.NEP2ScryptParams()) - if err != nil { - panic(err) - } - - signer := user.NewAutoIDSignerRFC6979(key.PrivateKey) - // take account from user's signer - accountID = signer.UserID() - - // prepare client - var prmInit client.PrmInit - - c, err := client.New(prmInit) - if err != nil { - panic(fmt.Errorf("New: %w", err)) - } - - // connect to NeoFS gateway - var prmDial client.PrmDial - prmDial.SetServerURI("grpc://localhost:8080") // endpoint address - prmDial.SetTimeout(15 * time.Second) - prmDial.SetStreamTimeout(15 * time.Second) - - if err = c.Dial(prmDial); err != nil { - panic(fmt.Errorf("dial %v", err)) - } - - // describe new container - cont := container.Container{} - // set version and nonce - cont.Init() - cont.SetOwner(accountID) - cont.SetBasicACL(acl.PublicRW) - - // set reserved attributes - cont.SetName("name-1") - cont.SetCreationTime(time.Now().UTC()) - - // init placement policy - var containerID cid.ID - var placementPolicyV2 netmapv2.PlacementPolicy - var replicas []netmapv2.Replica - - replica := netmapv2.Replica{} - replica.SetCount(1) - replicas = append(replicas, replica) - placementPolicyV2.SetReplicas(replicas) - - var placementPolicy netmap.PlacementPolicy - if err = placementPolicy.ReadFromV2(placementPolicyV2); err != nil { - panic(fmt.Errorf("ReadFromV2 %w", err)) - } - - placementPolicy.SetContainerBackupFactor(1) - cont.SetPlacementPolicy(placementPolicy) - - w := waiter.NewContainerPutWaiter(c, waiter.DefaultPollInterval) - - // waiter creates the container and waits until it will be created or context canceled. - containerID, err = w.ContainerPut(ctx, cont, signer, client.PrmContainerPut{}) - if err != nil { - panic(fmt.Errorf("ContainerPut %w", err)) - } - - // containerID already exists - fmt.Println(containerID) - // example output: 76wa5UNiT8gk8Q5rdCVCV4pKuZSmYsifh6g84BcL6Hqs - - contRes, err := c.ContainerGet(ctx, containerID, client.PrmContainerGet{}) - if err != nil { - panic(fmt.Errorf("ContainerGet %w", err)) - } - - jsonData, err := contRes.MarshalJSON() - if err != nil { - panic(fmt.Errorf("MarshalJSON %w", err)) - } - - fmt.Println(string(jsonData)) - // example output: {"version":{"major":2,"minor":13},"ownerID":{"value":"Ne6eoiwn40vQFI/EEI4I906PUEiy8ZXKcw=="},"nonce":"rPVd/iw2RW6Q6d66FVnIqg==","basicACL":532660223,"attributes":[{"key":"Name","value":"name-1"},{"key":"Timestamp","value":"1681738627"}],"placementPolicy":{"replicas":[{"count":1,"selector":""}],"containerBackupFactor":1,"selectors":[],"filters":[],"subnetId":{"value":0}}} -} +// // Put a new container into NeoFS. +// func ExampleClient_ContainerPut() { +// ctx := context.Background() +// var accountID user.ID +// +// // The account was taken from https://github.com/nspcc-dev/neofs-aio +// key, err := keys.NEP2Decrypt("6PYM8VdX2BSm7BSXKzV4Fz6S3R9cDLLWNrD9nMjxW352jEv3fsC8N3wNLY", "one", keys.NEP2ScryptParams()) +// if err != nil { +// panic(err) +// } +// +// signer := user.NewAutoIDSignerRFC6979(key.PrivateKey) +// // take account from user's signer +// accountID = signer.UserID() +// +// // prepare client +// var prmInit client.PrmInit +// +// c, err := client.New(prmInit) +// if err != nil { +// panic(fmt.Errorf("New: %w", err)) +// } +// +// // connect to NeoFS gateway +// var prmDial client.PrmDial +// prmDial.SetServerURI("grpc://localhost:8080") // endpoint address +// prmDial.SetTimeout(15 * time.Second) +// prmDial.SetStreamTimeout(15 * time.Second) +// +// if err = c.Dial(prmDial); err != nil { +// panic(fmt.Errorf("dial %v", err)) +// } +// +// // describe new container +// cont := container.Container{} +// // set version and nonce +// cont.Init() +// cont.SetOwner(accountID) +// cont.SetBasicACL(acl.PublicRW) +// +// // set reserved attributes +// cont.SetName("name-1") +// cont.SetCreationTime(time.Now().UTC()) +// +// // init placement policy +// var containerID cid.ID +// var placementPolicyV2 netmapv2.PlacementPolicy +// var replicas []netmapv2.Replica +// +// replica := netmapv2.Replica{} +// replica.SetCount(1) +// replicas = append(replicas, replica) +// placementPolicyV2.SetReplicas(replicas) +// +// var placementPolicy netmap.PlacementPolicy +// if err = placementPolicy.ReadFromV2(placementPolicyV2); err != nil { +// panic(fmt.Errorf("ReadFromV2 %w", err)) +// } +// +// placementPolicy.SetContainerBackupFactor(1) +// cont.SetPlacementPolicy(placementPolicy) +// +// w := waiter.NewContainerPutWaiter(c, waiter.DefaultPollInterval) +// +// // waiter creates the container and waits until it will be created or context canceled. +// containerID, err = w.ContainerPut(ctx, cont, signer, client.PrmContainerPut{}) +// if err != nil { +// panic(fmt.Errorf("ContainerPut %w", err)) +// } +// +// // containerID already exists +// fmt.Println(containerID) +// // example output: 76wa5UNiT8gk8Q5rdCVCV4pKuZSmYsifh6g84BcL6Hqs +// +// contRes, err := c.ContainerGet(ctx, containerID, client.PrmContainerGet{}) +// if err != nil { +// panic(fmt.Errorf("ContainerGet %w", err)) +// } +// +// jsonData, err := contRes.MarshalJSON() +// if err != nil { +// panic(fmt.Errorf("MarshalJSON %w", err)) +// } +// +// fmt.Println(string(jsonData)) +// // example output: {"version":{"major":2,"minor":13},"ownerID":{"value":"Ne6eoiwn40vQFI/EEI4I906PUEiy8ZXKcw=="},"nonce":"rPVd/iw2RW6Q6d66FVnIqg==","basicACL":532660223,"attributes":[{"key":"Name","value":"name-1"},{"key":"Timestamp","value":"1681738627"}],"placementPolicy":{"replicas":[{"count":1,"selector":""}],"containerBackupFactor":1,"selectors":[],"filters":[],"subnetId":{"value":0}}} +// } diff --git a/client/example_test.go b/client/example_test.go index 606518206..7aabf75b0 100644 --- a/client/example_test.go +++ b/client/example_test.go @@ -1,165 +1,148 @@ package client_test -import ( - "context" - "crypto/ecdsa" - "crypto/elliptic" - "crypto/rand" - "time" - - "github.com/google/uuid" - rpcClient "github.com/nspcc-dev/neofs-api-go/v2/rpc/client" - "github.com/nspcc-dev/neofs-api-go/v2/rpc/common" - "github.com/nspcc-dev/neofs-api-go/v2/rpc/grpc" - "github.com/nspcc-dev/neofs-sdk-go/client" - cid "github.com/nspcc-dev/neofs-sdk-go/container/id" - neofsecdsa "github.com/nspcc-dev/neofs-sdk-go/crypto/ecdsa" - "github.com/nspcc-dev/neofs-sdk-go/session" - "github.com/nspcc-dev/neofs-sdk-go/user" -) - -func ExampleClient_createInstance() { - // Create client instance - var prm client.PrmInit - c, err := client.New(prm) - _ = err - - // Connect to the NeoFS server - var prmDial client.PrmDial - prmDial.SetServerURI("grpc://localhost:8080") // endpoint address - prmDial.SetTimeout(15 * time.Second) - prmDial.SetStreamTimeout(15 * time.Second) - - _ = c.Dial(prmDial) -} - -type CustomRPCRequest struct { -} - -type CustomRPCResponse struct { -} - -func (a *CustomRPCRequest) ToGRPCMessage() grpc.Message { - return nil -} - -func (a *CustomRPCRequest) FromGRPCMessage(grpc.Message) error { - return nil -} - -func (a *CustomRPCResponse) ToGRPCMessage() grpc.Message { - return nil -} - -func (a *CustomRPCResponse) FromGRPCMessage(grpc.Message) error { - return nil -} - -// Consume custom service of the server. -func Example_customService() { - // syntax = "proto3"; - // - // service CustomService { - // rpc CustomRPC(CustomRPCRequest) returns (CustomRPCResponse); - // } - - // import "github.com/nspcc-dev/neofs-api-go/v2/rpc/client" - // import "github.com/nspcc-dev/neofs-api-go/v2/rpc/common" - - var prmInit client.PrmInit - // ... - - c, _ := client.New(prmInit) - - req := &CustomRPCRequest{} - resp := &CustomRPCResponse{} - - err := c.ExecRaw(func(c *rpcClient.Client) error { - return rpcClient.SendUnary(c, common.CallMethodInfo{ - Service: "CustomService", - Name: "CustomRPC", - }, req, resp) - }) - - _ = err - - // ... - - // Close the connection - _ = c.Close() - - // Note that it's not allowed to override Client behaviour directly: the parameters - // for the all operations are write-only and the results of the all operations are - // read-only. To be able to override client behavior (e.g. for tests), abstract it - // with an interface: - // - // import "github.com/nspcc-dev/neofs-sdk-go/client" - // - // type NeoFSClient interface { - // // Operations according to the application needs - // CreateContainer(context.Context, container.Container) error - // // ... - // } - // - // type client struct { - // c *client.Client - // } - // - // func (x *client) CreateContainer(context.Context, container.Container) error { - // // ... - // } -} - -// Session created for the one node, and it will work only for this node. Other nodes don't have info about this session. -// That is why session can't be created with Pool API. -func ExampleClient_SessionCreate() { - // import "github.com/google/uuid" - - var prmInit client.PrmInit - // ... - c, _ := client.New(prmInit) - - // Epoch when session will expire. - // Note that expiration starts since exp+1 epoch. - // For instance, now you have 8 epoch. You set exp=10. The session will be still valid during 10th epoch. - // Expiration starts since 11 epoch. - var exp uint64 - var prm client.PrmSessionCreate - prm.SetExp(exp) - - // The key is generated to simplify the example, in reality it's likely to come from configuration/wallet. - pk, _ := ecdsa.GenerateKey(elliptic.P256(), rand.Reader) - signer := user.NewAutoIDSignerRFC6979(*pk) - - res, _ := c.SessionCreate(context.Background(), signer, prm) - - var id uuid.UUID - _ = id.UnmarshalBinary(res.ID()) - - // Public key for separate private key, which was created inside node for this session. - var key neofsecdsa.PublicKey - _ = key.Decode(res.PublicKey()) - - // Fill session parameters - var sessionObject session.Object - sessionObject.SetID(id) - sessionObject.SetAuthKey(&key) - sessionObject.SetExp(exp) - - // Attach verb and container. Session allows to do just one action by time. In this example it is a VerbObjectPut. - // If you need Get, Delete, etc you should create another session. - sessionObject.ForVerb(session.VerbObjectPut) - // Session works only with one container. - sessionObject.BindContainer(cid.ID{}) - - // Finally, token must be signed by container owner or someone who allowed to do the Verb action. In our example - // it is VerbObjectPut. - _ = sessionObject.Sign(signer) - - // ... - - // This token will be used in object put operation - var prmPut client.PrmObjectPutInit - prmPut.WithinSession(sessionObject) - // ... -} +// +// func ExampleClient_createInstance() { +// // Create client instance +// var prm client.PrmInit +// c, err := client.New(prm) +// _ = err +// +// // Connect to the NeoFS server +// var prmDial client.PrmDial +// prmDial.SetServerURI("grpc://localhost:8080") // endpoint address +// prmDial.SetTimeout(15 * time.Second) +// prmDial.SetStreamTimeout(15 * time.Second) +// +// _ = c.Dial(prmDial) +// } +// +// type CustomRPCRequest struct { +// } +// +// type CustomRPCResponse struct { +// } +// +// func (a *CustomRPCRequest) ToGRPCMessage() grpc.Message { +// return nil +// } +// +// func (a *CustomRPCRequest) FromGRPCMessage(grpc.Message) error { +// return nil +// } +// +// func (a *CustomRPCResponse) ToGRPCMessage() grpc.Message { +// return nil +// } +// +// func (a *CustomRPCResponse) FromGRPCMessage(grpc.Message) error { +// return nil +// } +// +// // Consume custom service of the server. +// func Example_customService() { +// // syntax = "proto3"; +// // +// // service CustomService { +// // rpc CustomRPC(CustomRPCRequest) returns (CustomRPCResponse); +// // } +// +// // import "github.com/nspcc-dev/neofs-api-go/v2/rpc/client" +// // import "github.com/nspcc-dev/neofs-api-go/v2/rpc/common" +// +// var prmInit client.PrmInit +// // ... +// +// c, _ := client.New(prmInit) +// +// req := &CustomRPCRequest{} +// resp := &CustomRPCResponse{} +// +// err := c.ExecRaw(func(c *rpcClient.Client) error { +// return rpcClient.SendUnary(c, common.CallMethodInfo{ +// Service: "CustomService", +// Name: "CustomRPC", +// }, req, resp) +// }) +// +// _ = err +// +// // ... +// +// // Close the connection +// _ = c.Close() +// +// // Note that it's not allowed to override Client behaviour directly: the parameters +// // for the all operations are write-only and the results of the all operations are +// // read-only. To be able to override client behavior (e.g. for tests), abstract it +// // with an interface: +// // +// // import "github.com/nspcc-dev/neofs-sdk-go/client" +// // +// // type NeoFSClient interface { +// // // Operations according to the application needs +// // CreateContainer(context.Context, container.Container) error +// // // ... +// // } +// // +// // type client struct { +// // c *client.Client +// // } +// // +// // func (x *client) CreateContainer(context.Context, container.Container) error { +// // // ... +// // } +// } +// +// // Session created for the one node, and it will work only for this node. Other nodes don't have info about this session. +// // That is why session can't be created with Pool API. +// func ExampleClient_SessionCreate() { +// // import "github.com/google/uuid" +// +// var prmInit client.PrmInit +// // ... +// c, _ := client.New(prmInit) +// +// // Epoch when session will expire. +// // Note that expiration starts since exp+1 epoch. +// // For instance, now you have 8 epoch. You set exp=10. The session will be still valid during 10th epoch. +// // Expiration starts since 11 epoch. +// var exp uint64 +// var prm client.PrmSessionCreate +// prm.SetExp(exp) +// +// // The key is generated to simplify the example, in reality it's likely to come from configuration/wallet. +// pk, _ := ecdsa.GenerateKey(elliptic.P256(), rand.Reader) +// signer := user.NewAutoIDSignerRFC6979(*pk) +// +// res, _ := c.SessionCreate(context.Background(), signer, prm) +// +// var id uuid.UUID +// _ = id.UnmarshalBinary(res.ID()) +// +// // Public key for separate private key, which was created inside node for this session. +// var key neofsecdsa.PublicKey +// _ = key.Decode(res.PublicKey()) +// +// // Fill session parameters +// var sessionObject session.Object +// sessionObject.SetID(id) +// sessionObject.SetAuthKey(&key) +// sessionObject.SetExp(exp) +// +// // Attach verb and container. Session allows to do just one action by time. In this example it is a VerbObjectPut. +// // If you need Get, Delete, etc you should create another session. +// sessionObject.ForVerb(session.VerbObjectPut) +// // Session works only with one container. +// sessionObject.BindContainer(cid.ID{}) +// +// // Finally, token must be signed by container owner or someone who allowed to do the Verb action. In our example +// // it is VerbObjectPut. +// _ = sessionObject.Sign(signer) +// +// // ... +// +// // This token will be used in object put operation +// var prmPut client.PrmObjectPutInit +// prmPut.WithinSession(sessionObject) +// // ... +// } diff --git a/client/netmap.go b/client/netmap.go index 672dc86c5..5c0238461 100644 --- a/client/netmap.go +++ b/client/netmap.go @@ -2,258 +2,254 @@ package client import ( "context" + "errors" "fmt" + "time" - v2netmap "github.com/nspcc-dev/neofs-api-go/v2/netmap" - rpcapi "github.com/nspcc-dev/neofs-api-go/v2/rpc" - "github.com/nspcc-dev/neofs-api-go/v2/rpc/client" - v2session "github.com/nspcc-dev/neofs-api-go/v2/session" + apinetmap "github.com/nspcc-dev/neofs-sdk-go/api/netmap" + apistatus "github.com/nspcc-dev/neofs-sdk-go/client/status" + neofscrypto "github.com/nspcc-dev/neofs-sdk-go/crypto" "github.com/nspcc-dev/neofs-sdk-go/netmap" "github.com/nspcc-dev/neofs-sdk-go/stat" "github.com/nspcc-dev/neofs-sdk-go/version" ) -var ( - // special variables for test purposes only, to overwrite real RPC calls. - rpcAPINetworkInfo = rpcapi.NetworkInfo - rpcAPILocalNodeInfo = rpcapi.LocalNodeInfo -) - -// NetworkInfoExecutor describes methods to get network information. -type NetworkInfoExecutor interface { - NetworkInfo(ctx context.Context, prm PrmNetworkInfo) (netmap.NetworkInfo, error) -} - -// PrmEndpointInfo groups parameters of EndpointInfo operation. -type PrmEndpointInfo struct { - prmCommonMeta -} - -// ResEndpointInfo group resulting values of EndpointInfo operation. -type ResEndpointInfo struct { - version version.Version - - ni netmap.NodeInfo -} +// GetEndpointInfoOptions groups optional parameters of [Client.GetEndpointInfo]. +type GetEndpointInfoOptions struct{} -// NewResEndpointInfo is a constructor for ResEndpointInfo. -func NewResEndpointInfo(version version.Version, ni netmap.NodeInfo) ResEndpointInfo { - return ResEndpointInfo{ - version: version, - ni: ni, - } -} - -// LatestVersion returns latest NeoFS API protocol's version in use. -func (x ResEndpointInfo) LatestVersion() version.Version { - return x.version -} - -// NodeInfo returns information about the NeoFS node served on the remote endpoint. -func (x ResEndpointInfo) NodeInfo() netmap.NodeInfo { - return x.ni +// EndpointInfo is a result of [Client.GetEndpointInfo] operation. +type EndpointInfo struct { + // The latest NeoFS API protocol's version in use. + LatestVersion version.Version + // Information about the NeoFS node served on the remote endpoint. + Node netmap.NodeInfo } -// EndpointInfo requests information about the storage node served on the remote endpoint. -// -// Method can be used as a health check to see if node is alive and responds to requests. -// -// Any client's internal or transport errors are returned as `error`, -// see [apistatus] package for NeoFS-specific error types. -// -// Context is required and must not be nil. It is used for network communication. -// -// Exactly one return value is non-nil. Server status return is returned in ResEndpointInfo. -// Reflects all internal errors in second return value (transport problems, response processing, etc.). -func (c *Client) EndpointInfo(ctx context.Context, prm PrmEndpointInfo) (*ResEndpointInfo, error) { +// GetEndpointInfo requests information about the storage node served on the +// remote endpoint. GetEndpointInfo can be used as a health check to see if node +// is alive and responds to requests. +func (c *Client) GetEndpointInfo(ctx context.Context, _ GetEndpointInfoOptions) (EndpointInfo, error) { + var res EndpointInfo var err error - defer func() { - c.sendStatistic(stat.MethodEndpointInfo, err)() - }() + if c.serverPubKey != nil && c.handleAPIOpResult != nil { + // serverPubKey can be nil since it's initialized using EndpointInfo itself on dial + defer func(start time.Time) { + c.handleAPIOpResult(c.serverPubKey, c.endpoint, stat.MethodEndpointInfo, time.Since(start), err) + }(time.Now()) + } // form request - var req v2netmap.LocalNodeInfoRequest - - // init call context - - var ( - cc contextCall - res ResEndpointInfo - ) - - c.initCallContext(&cc) - cc.meta = prm.prmCommonMeta - cc.req = &req - cc.call = func() (responseV2, error) { - return rpcAPILocalNodeInfo(&c.c, &req, client.WithContext(ctx)) + req := new(apinetmap.LocalNodeInfoRequest) + // FIXME: balance requests need small fixed-size buffers for encoding, its makes + // no sense to mosh them with other buffers + buf := c.signBuffers.Get().(*[]byte) + defer c.signBuffers.Put(buf) + if req.VerifyHeader, err = neofscrypto.SignRequest(c.signer, req, req.Body, *buf); err != nil { + err = fmt.Errorf("%s: %w", errSignRequest, err) // for closure above + return res, err } - cc.result = func(r responseV2) { - resp := r.(*v2netmap.LocalNodeInfoResponse) - - body := resp.GetBody() - - const fieldVersion = "version" - - verV2 := body.GetVersion() - if verV2 == nil { - cc.err = newErrMissingResponseField(fieldVersion) - return - } - - cc.err = res.version.ReadFromV2(*verV2) - if cc.err != nil { - cc.err = newErrInvalidResponseField(fieldVersion, cc.err) - return - } - - const fieldNodeInfo = "node info" - nodeInfoV2 := body.GetNodeInfo() - if nodeInfoV2 == nil { - cc.err = newErrMissingResponseField(fieldNodeInfo) - return - } + // send request + resp, err := c.transport.netmap.LocalNodeInfo(ctx, req) + if err != nil { + err = fmt.Errorf("%s: %w", errTransport, err) // for closure above + return res, err + } - cc.err = res.ni.ReadFromV2(*nodeInfoV2) - if cc.err != nil { - cc.err = newErrInvalidResponseField(fieldNodeInfo, cc.err) - return + // intercept response info + if c.interceptAPIRespInfo != nil { + if err = c.interceptAPIRespInfo(ResponseMetaInfo{ + key: resp.GetVerifyHeader().GetBodySignature().GetKey(), + epoch: resp.GetMetaHeader().GetEpoch(), + }); err != nil { + err = fmt.Errorf("%s: %w", errInterceptResponseInfo, err) // for closure above + return res, err } } - // process call - if !cc.processCall() { - err = cc.err - return nil, err + // verify response integrity + if err = neofscrypto.VerifyResponse(resp, resp.Body); err != nil { + err = fmt.Errorf("%s: %w", errResponseSignature, err) // for closure above + return res, err + } + sts, err := apistatus.ErrorFromV2(resp.GetMetaHeader().GetStatus()) + if err != nil { + err = fmt.Errorf("%s: %w", errInvalidResponseStatus, err) // for closure above + return res, err + } + if sts != nil { + err = sts // for closure above + return res, err } - return &res, nil + // decode response payload + if resp.Body == nil { + err = errors.New(errMissingResponseBody) // for closure above + return res, err + } + const fieldVersion = "version" + if resp.Body.Version == nil { + err = fmt.Errorf("%s (%s)", errMissingResponseBodyField, fieldVersion) // for closure above + return res, err + } + if err = res.LatestVersion.ReadFromV2(resp.Body.Version); err != nil { + err = fmt.Errorf("%s (%s)", errInvalidResponseBodyField, fieldVersion) // for closure above + return res, err + } + const fieldNodeInfo = "node info" + if resp.Body.NodeInfo == nil { + err = fmt.Errorf("%s (%s)", errMissingResponseBodyField, fieldNodeInfo) // for closure above + return res, err + } else if err = res.Node.ReadFromV2(resp.Body.NodeInfo); err != nil { + err = fmt.Errorf("%s (%s): %w", errInvalidResponseBodyField, fieldNodeInfo, err) // for closure above + return res, err + } + return res, nil } -// PrmNetworkInfo groups parameters of NetworkInfo operation. -type PrmNetworkInfo struct { - prmCommonMeta -} +// GetNetworkInfoOptions groups optional parameters of [Client.GetNetworkInfo]. +type GetNetworkInfoOptions struct{} -// NetworkInfo requests information about the NeoFS network of which the remote server is a part. -// -// Any client's internal or transport errors are returned as `error`, -// see [apistatus] package for NeoFS-specific error types. -// -// Context is required and must not be nil. It is used for network communication. -// -// Reflects all internal errors in second return value (transport problems, response processing, etc.). -func (c *Client) NetworkInfo(ctx context.Context, prm PrmNetworkInfo) (netmap.NetworkInfo, error) { +// GetNetworkInfo requests information about the NeoFS network of which the remote +// server is a part. +func (c *Client) GetNetworkInfo(ctx context.Context, _ GetNetworkInfoOptions) (netmap.NetworkInfo, error) { + var res netmap.NetworkInfo var err error - defer func() { - c.sendStatistic(stat.MethodNetworkInfo, err)() - }() + if c.handleAPIOpResult != nil { + defer func(start time.Time) { + c.handleAPIOpResult(c.serverPubKey, c.endpoint, stat.MethodNetworkInfo, time.Since(start), err) + }(time.Now()) + } // form request - var req v2netmap.NetworkInfoRequest - - // init call context - - var ( - cc contextCall - res netmap.NetworkInfo - ) - - c.initCallContext(&cc) - cc.meta = prm.prmCommonMeta - cc.req = &req - cc.call = func() (responseV2, error) { - return rpcAPINetworkInfo(&c.c, &req, client.WithContext(ctx)) + req := new(apinetmap.NetworkInfoRequest) + // FIXME: balance requests need small fixed-size buffers for encoding, its makes + // no sense to mosh them with other buffers + buf := c.signBuffers.Get().(*[]byte) + defer c.signBuffers.Put(buf) + if req.VerifyHeader, err = neofscrypto.SignRequest(c.signer, req, req.Body, *buf); err != nil { + err = fmt.Errorf("%s: %w", errSignRequest, err) // for closure above + return res, err } - cc.result = func(r responseV2) { - resp := r.(*v2netmap.NetworkInfoResponse) - const fieldNetInfo = "network info" - - netInfoV2 := resp.GetBody().GetNetworkInfo() - if netInfoV2 == nil { - cc.err = newErrMissingResponseField(fieldNetInfo) - return - } + // send request + resp, err := c.transport.netmap.NetworkInfo(ctx, req) + if err != nil { + err = fmt.Errorf("%s: %w", errTransport, err) // for closure above + return res, err + } - cc.err = res.ReadFromV2(*netInfoV2) - if cc.err != nil { - cc.err = newErrInvalidResponseField(fieldNetInfo, cc.err) - return + // intercept response info + if c.interceptAPIRespInfo != nil { + if err = c.interceptAPIRespInfo(ResponseMetaInfo{ + key: resp.GetVerifyHeader().GetBodySignature().GetKey(), + epoch: resp.GetMetaHeader().GetEpoch(), + }); err != nil { + err = fmt.Errorf("%s: %w", errInterceptResponseInfo, err) // for closure above + return res, err } } - // process call - if !cc.processCall() { - err = cc.err - return netmap.NetworkInfo{}, cc.err + // verify response integrity + if err = neofscrypto.VerifyResponse(resp, resp.Body); err != nil { + err = fmt.Errorf("%s: %w", errResponseSignature, err) // for closure above + return res, err + } + sts, err := apistatus.ErrorFromV2(resp.GetMetaHeader().GetStatus()) + if err != nil { + err = fmt.Errorf("%s: %w", errInvalidResponseStatus, err) // for closure above + return res, err + } + if sts != nil { + err = sts // for closure above + return res, err } + // decode response payload + if resp.Body == nil { + err = errors.New(errMissingResponseBody) // for closure above + return res, err + } + const fieldNetworkInfo = "network info" + if resp.Body.NetworkInfo == nil { + err = fmt.Errorf("%s (%s)", errMissingResponseBodyField, fieldNetworkInfo) // for closure above + return res, err + } else if err = res.ReadFromV2(resp.Body.NetworkInfo); err != nil { + err = fmt.Errorf("%s (%s): %w", errInvalidResponseBodyField, fieldNetworkInfo, err) // for closure above + return res, err + } return res, nil } -// PrmNetMapSnapshot groups parameters of NetMapSnapshot operation. -type PrmNetMapSnapshot struct { -} +// GetCurrentNetmapOptions groups optional parameters of [Client.GetCurrentNetmap] operation. +type GetCurrentNetmapOptions struct{} -// NetMapSnapshot requests current network view of the remote server. -// -// Any client's internal or transport errors are returned as `error`, -// see [apistatus] package for NeoFS-specific error types. -// -// Context is required and MUST NOT be nil. It is used for network communication. -// -// Reflects all internal errors in second return value (transport problems, response processing, etc.). -func (c *Client) NetMapSnapshot(ctx context.Context, _ PrmNetMapSnapshot) (netmap.NetMap, error) { +// GetCurrentNetmap requests current network map from the remote server. +func (c *Client) GetCurrentNetmap(ctx context.Context, _ GetCurrentNetmapOptions) (netmap.NetMap, error) { + var res netmap.NetMap var err error - defer func() { - c.sendStatistic(stat.MethodNetMapSnapshot, err)() - }() - - // form request body - var body v2netmap.SnapshotRequestBody - - // form meta header - var meta v2session.RequestMetaHeader + if c.handleAPIOpResult != nil { + defer func(start time.Time) { + c.handleAPIOpResult(c.serverPubKey, c.endpoint, stat.MethodNetMapSnapshot, time.Since(start), err) + }(time.Now()) + } // form request - var req v2netmap.SnapshotRequest - req.SetBody(&body) - c.prepareRequest(&req, &meta) - - buf := c.buffers.Get().(*[]byte) - err = signServiceMessage(c.prm.signer, &req, *buf) - c.buffers.Put(buf) - if err != nil { - err = fmt.Errorf("sign request: %w", err) - return netmap.NetMap{}, err + req := new(apinetmap.NetmapSnapshotRequest) + // FIXME: balance requests need small fixed-size buffers for encoding, its makes + // no sense to mosh them with other buffers + buf := c.signBuffers.Get().(*[]byte) + defer c.signBuffers.Put(buf) + if req.VerifyHeader, err = neofscrypto.SignRequest(c.signer, req, req.Body, *buf); err != nil { + err = fmt.Errorf("%s: %w", errSignRequest, err) // for closure above + return res, err } - var resp *v2netmap.SnapshotResponse - - resp, err = c.server.netMapSnapshot(ctx, req) + // send request + resp, err := c.transport.netmap.NetmapSnapshot(ctx, req) if err != nil { - return netmap.NetMap{}, err + err = fmt.Errorf("%s: %w", errTransport, err) // for closure above + return res, err } - var res netmap.NetMap - if err = c.processResponse(resp); err != nil { - return netmap.NetMap{}, err + // intercept response info + if c.interceptAPIRespInfo != nil { + if err = c.interceptAPIRespInfo(ResponseMetaInfo{ + key: resp.GetVerifyHeader().GetBodySignature().GetKey(), + epoch: resp.GetMetaHeader().GetEpoch(), + }); err != nil { + err = fmt.Errorf("%s: %w", errInterceptResponseInfo, err) // for closure above + return res, err + } } - const fieldNetMap = "network map" - - netMapV2 := resp.GetBody().NetMap() - if netMapV2 == nil { - err = newErrMissingResponseField(fieldNetMap) - return netmap.NetMap{}, err + // verify response integrity + if err = neofscrypto.VerifyResponse(resp, resp.Body); err != nil { + err = fmt.Errorf("%s: %w", errResponseSignature, err) // for closure above + return res, err } - - err = res.ReadFromV2(*netMapV2) + sts, err := apistatus.ErrorFromV2(resp.GetMetaHeader().GetStatus()) if err != nil { - err = newErrInvalidResponseField(fieldNetMap, err) - return netmap.NetMap{}, err + err = fmt.Errorf("%s: %w", errInvalidResponseStatus, err) // for closure above + return res, err + } + if sts != nil { + err = sts // for closure above + return res, err } + // decode response payload + if resp.Body == nil { + err = errors.New(errMissingResponseBody) // for closure above + return res, err + } + const fieldNetmap = "network map" + if resp.Body.Netmap == nil { + err = fmt.Errorf("%s (%s)", errMissingResponseBodyField, fieldNetmap) // for closure above + return res, err + } else if err = res.ReadFromV2(resp.Body.Netmap); err != nil { + err = fmt.Errorf("%s (%s): %w", errInvalidResponseBodyField, fieldNetmap, err) // for closure above + return res, err + } return res, nil } diff --git a/client/netmap_test.go b/client/netmap_test.go index f2c644268..70e3d65d2 100644 --- a/client/netmap_test.go +++ b/client/netmap_test.go @@ -1,143 +1,1337 @@ package client import ( + "bytes" "context" "errors" "fmt" + "net" "testing" + "time" - v2netmap "github.com/nspcc-dev/neofs-api-go/v2/netmap" - "github.com/nspcc-dev/neofs-api-go/v2/rpc/client" - "github.com/nspcc-dev/neofs-api-go/v2/session" + apinetmap "github.com/nspcc-dev/neofs-sdk-go/api/netmap" + "github.com/nspcc-dev/neofs-sdk-go/api/refs" + apisession "github.com/nspcc-dev/neofs-sdk-go/api/session" + "github.com/nspcc-dev/neofs-sdk-go/api/status" apistatus "github.com/nspcc-dev/neofs-sdk-go/client/status" neofscrypto "github.com/nspcc-dev/neofs-sdk-go/crypto" - "github.com/nspcc-dev/neofs-sdk-go/crypto/test" + neofscryptotest "github.com/nspcc-dev/neofs-sdk-go/crypto/test" "github.com/nspcc-dev/neofs-sdk-go/netmap" + netmaptest "github.com/nspcc-dev/neofs-sdk-go/netmap/test" + "github.com/nspcc-dev/neofs-sdk-go/stat" + "github.com/nspcc-dev/neofs-sdk-go/version" + versiontest "github.com/nspcc-dev/neofs-sdk-go/version/test" + "github.com/stretchr/testify/assert" "github.com/stretchr/testify/require" + "google.golang.org/grpc" + "google.golang.org/grpc/test/bufconn" ) -type serverNetMap struct { - errTransport error +type noOtherNetmapCalls struct{} - signResponse bool - - statusOK bool +func (noOtherNetmapCalls) LocalNodeInfo(context.Context, *apinetmap.LocalNodeInfoRequest) (*apinetmap.LocalNodeInfoResponse, error) { + panic("must not be called") +} - setNetMap bool - netMap v2netmap.NetMap +func (noOtherNetmapCalls) NetworkInfo(context.Context, *apinetmap.NetworkInfoRequest) (*apinetmap.NetworkInfoResponse, error) { + panic("must not be called") +} - signer neofscrypto.Signer +func (noOtherNetmapCalls) NetmapSnapshot(context.Context, *apinetmap.NetmapSnapshotRequest) (*apinetmap.NetmapSnapshotResponse, error) { + panic("must not be called") } -func (x *serverNetMap) createSession(*client.Client, *session.CreateRequest, ...client.CallOption) (*session.CreateResponse, error) { - return nil, nil +// implements [apinetmap.NetmapServiceServer] with simplified LocalNodeInfo to +// be used on [Client.Dial] while testing other methods. +type endpointInfoOnDialServer struct { + noOtherNetmapCalls + epoch uint64 + serverSigner neofscrypto.Signer + latestVersion version.Version + nodeInfo netmap.NodeInfo } -func (x *serverNetMap) netMapSnapshot(_ context.Context, req v2netmap.SnapshotRequest) (*v2netmap.SnapshotResponse, error) { - err := verifyServiceMessage(&req) +func (x endpointInfoOnDialServer) LocalNodeInfo(_ context.Context, _ *apinetmap.LocalNodeInfoRequest) (*apinetmap.LocalNodeInfoResponse, error) { + resp := apinetmap.LocalNodeInfoResponse{ + Body: &apinetmap.LocalNodeInfoResponse_Body{ + Version: new(refs.Version), + NodeInfo: new(apinetmap.NodeInfo), + }, + MetaHeader: &apisession.ResponseMetaHeader{ + Epoch: x.epoch, + }, + } + x.latestVersion.WriteToV2(resp.Body.Version) + x.nodeInfo.WriteToV2(resp.Body.NodeInfo) + var err error + resp.VerifyHeader, err = neofscrypto.SignResponse(x.serverSigner, &resp, resp.Body, nil) if err != nil { - return nil, err + return nil, fmt.Errorf("sign response: %w", err) } + return &resp, nil +} +type endpointInfoServer struct { + // client + clientSigScheme neofscrypto.Scheme + clientPubKey []byte + // server + called bool // to distinguish on-dial call + sleepDur time.Duration + endpointInfoOnDialServer + errTransport error + modifyResp func(*apinetmap.LocalNodeInfoResponse) + corruptRespSig func(*apinetmap.LocalNodeInfoResponse) +} + +func (x *endpointInfoServer) LocalNodeInfo(ctx context.Context, req *apinetmap.LocalNodeInfoRequest) (*apinetmap.LocalNodeInfoResponse, error) { + defer func() { x.called = true }() + if !x.called { + return x.endpointInfoOnDialServer.LocalNodeInfo(ctx, req) + } + if x.sleepDur > 0 { + time.Sleep(x.sleepDur) + } if x.errTransport != nil { return nil, x.errTransport } - - var body v2netmap.SnapshotResponseBody - - if x.setNetMap { - body.SetNetMap(&x.netMap) + var sts status.Status + resp := apinetmap.LocalNodeInfoResponse{ + MetaHeader: &apisession.ResponseMetaHeader{Status: &sts, Epoch: x.epoch}, } - - var meta session.ResponseMetaHeader - - if !x.statusOK { - meta.SetStatus(statusErr.ErrorToV2()) - } - - var resp v2netmap.SnapshotResponse - resp.SetBody(&body) - resp.SetMetaHeader(&meta) - - if x.signResponse { - err = signServiceMessage(x.signer, &resp, nil) - if err != nil { - panic(fmt.Sprintf("sign response: %v", err)) + var err error + if ctx == nil { + sts.Code, sts.Message = status.InternalServerError, "nil context" + } else if req == nil { + sts.Code, sts.Message = status.InternalServerError, "nil request" + } else if err = neofscrypto.VerifyRequest(req, req.Body); err != nil { + sts.Code, sts.Message = status.SignatureVerificationFail, err.Error() + } else if req.VerifyHeader.BodySignature.Scheme != refs.SignatureScheme(x.clientSigScheme) || + !bytes.Equal(req.VerifyHeader.BodySignature.Key, x.clientPubKey) { + sts.Code, sts.Message = status.InternalServerError, "[test] unexpected request body signature credentials" + } else if req.VerifyHeader.MetaSignature.Scheme != refs.SignatureScheme(x.clientSigScheme) || + !bytes.Equal(req.VerifyHeader.MetaSignature.Key, x.clientPubKey) { + sts.Code, sts.Message = status.InternalServerError, "[test] unexpected request meta header signature credentials" + } else if req.VerifyHeader.OriginSignature.Scheme != refs.SignatureScheme(x.clientSigScheme) || + !bytes.Equal(req.VerifyHeader.OriginSignature.Key, x.clientPubKey) { + sts.Code, sts.Message = status.InternalServerError, "[test] unexpected origin request verification header signature credentials" + } else if req.MetaHeader != nil { + sts.Code, sts.Message = status.InternalServerError, "invalid request: meta header is set" + } else { + resp.MetaHeader.Status = nil + resp.Body = &apinetmap.LocalNodeInfoResponse_Body{ + Version: new(refs.Version), + NodeInfo: new(apinetmap.NodeInfo), } + x.latestVersion.WriteToV2(resp.Body.Version) + x.nodeInfo.WriteToV2(resp.Body.NodeInfo) + } + if x.modifyResp != nil { + x.modifyResp(&resp) + } + resp.VerifyHeader, err = neofscrypto.SignResponse(x.serverSigner, &resp, resp.Body, nil) + if err != nil { + return nil, fmt.Errorf("sign response: %w", err) + } + if x.corruptRespSig != nil { + x.corruptRespSig(&resp) } - return &resp, nil } -func TestClient_NetMapSnapshot(t *testing.T) { - var err error - var prm PrmNetMapSnapshot - var res netmap.NetMap - var srv serverNetMap - - signer := test.RandomSignerRFC6979(t) - - srv.signer = signer - - c := newClient(t, &srv) +func TestClient_GetEndpointInfo(t *testing.T) { ctx := context.Background() + var srv endpointInfoServer + srv.sleepDur = 10 * time.Millisecond + srv.serverSigner = neofscryptotest.RandomSigner() + srv.latestVersion = versiontest.Version() + srv.nodeInfo = netmaptest.NodeInfo() + srv.nodeInfo.SetPublicKey(neofscrypto.PublicKeyBytes(srv.serverSigner.Public())) + _dial := func(t testing.TB, srv *endpointInfoServer, assertErr func(error), customizeOpts func(*Options)) (*Client, *bool) { + var opts Options + var handlerCalled bool + opts.SetAPIRequestResultHandler(func(nodeKey []byte, endpoint string, op stat.Method, dur time.Duration, err error) { + handlerCalled = true + require.Equal(t, srv.nodeInfo.PublicKey(), nodeKey) + require.Equal(t, "localhost:8080", endpoint) + require.Equal(t, stat.MethodEndpointInfo, op) + require.Greater(t, dur, srv.sleepDur) + assertErr(err) + }) + if customizeOpts != nil { + customizeOpts(&opts) + } - // request signature - srv.errTransport = errors.New("any error") - - _, err = c.NetMapSnapshot(ctx, prm) - require.ErrorIs(t, err, srv.errTransport) + c, err := New(anyValidURI, opts) + require.NoError(t, err) + srv.clientSigScheme = c.signer.Scheme() + srv.clientPubKey = neofscrypto.PublicKeyBytes(c.signer.Public()) - srv.errTransport = nil + conn := bufconn.Listen(10 << 10) + gs := grpc.NewServer() + apinetmap.RegisterNetmapServiceServer(gs, srv) + go func() { _ = gs.Serve(conn) }() + t.Cleanup(gs.Stop) - // unsigned response - _, err = c.NetMapSnapshot(ctx, prm) - require.Error(t, err) + c.dial = func(ctx context.Context, _ string) (net.Conn, error) { return conn.DialContext(ctx) } + require.NoError(t, c.Dial(ctx)) - srv.signResponse = true + return c, &handlerCalled + } + dial := func(t testing.TB, srv *endpointInfoServer, assertErr func(error)) (*Client, *bool) { + return _dial(t, srv, assertErr, nil) + } + t.Run("OK", func(t *testing.T) { + srv := srv + assertErr := func(err error) { require.NoError(t, err) } + c, handlerCalled := dial(t, &srv, assertErr) + res, err := c.GetEndpointInfo(ctx, GetEndpointInfoOptions{}) + assertErr(err) + require.Equal(t, srv.latestVersion, res.LatestVersion) + require.Equal(t, srv.nodeInfo, res.Node) + require.True(t, *handlerCalled) + }) + t.Run("fail", func(t *testing.T) { + t.Run("sign request", func(t *testing.T) { + srv := srv + srv.sleepDur = 0 + assertErr := func(err error) { require.ErrorContains(t, err, errSignRequest) } + c, handlerCalled := dial(t, &srv, assertErr) + c.signer = neofscryptotest.FailSigner(c.signer) + _, err := c.GetEndpointInfo(ctx, GetEndpointInfoOptions{}) + assertErr(err) + require.True(t, *handlerCalled) + }) + t.Run("transport", func(t *testing.T) { + srv := srv + srv.errTransport = errors.New("any transport failure") + assertErr := func(err error) { + require.ErrorContains(t, err, errTransport) + require.ErrorContains(t, err, "any transport failure") + } + c, handlerCalled := dial(t, &srv, assertErr) + _, err := c.GetEndpointInfo(ctx, GetEndpointInfoOptions{}) + assertErr(err) + require.True(t, *handlerCalled) + }) + t.Run("invalid response signature", func(t *testing.T) { + for i, testCase := range []struct { + err string + corrupt func(*apinetmap.LocalNodeInfoResponse) + }{ + {err: "missing verification header", + corrupt: func(r *apinetmap.LocalNodeInfoResponse) { r.VerifyHeader = nil }, + }, + {err: "missing body signature", + corrupt: func(r *apinetmap.LocalNodeInfoResponse) { r.VerifyHeader.BodySignature = nil }, + }, + {err: "missing signature of the meta header", + corrupt: func(r *apinetmap.LocalNodeInfoResponse) { r.VerifyHeader.MetaSignature = nil }, + }, + {err: "missing signature of the origin verification header", + corrupt: func(r *apinetmap.LocalNodeInfoResponse) { r.VerifyHeader.OriginSignature = nil }, + }, + {err: "verify body signature: missing public key", + corrupt: func(r *apinetmap.LocalNodeInfoResponse) { r.VerifyHeader.BodySignature.Key = nil }, + }, + {err: "verify signature of the meta header: missing public key", + corrupt: func(r *apinetmap.LocalNodeInfoResponse) { r.VerifyHeader.MetaSignature.Key = nil }, + }, + {err: "verify signature of the origin verification header: missing public key", + corrupt: func(r *apinetmap.LocalNodeInfoResponse) { r.VerifyHeader.OriginSignature.Key = nil }, + }, + {err: "verify body signature: decode public key from binary", + corrupt: func(r *apinetmap.LocalNodeInfoResponse) { + r.VerifyHeader.BodySignature.Key = []byte("not a public key") + }, + }, + {err: "verify signature of the meta header: decode public key from binary", + corrupt: func(r *apinetmap.LocalNodeInfoResponse) { + r.VerifyHeader.MetaSignature.Key = []byte("not a public key") + }, + }, + {err: "verify signature of the origin verification header: decode public key from binary", + corrupt: func(r *apinetmap.LocalNodeInfoResponse) { + r.VerifyHeader.OriginSignature.Key = []byte("not a public key") + }, + }, + {err: "verify body signature: invalid scheme -1", + corrupt: func(r *apinetmap.LocalNodeInfoResponse) { r.VerifyHeader.BodySignature.Scheme = -1 }, + }, + {err: "verify body signature: unsupported scheme 3", + corrupt: func(r *apinetmap.LocalNodeInfoResponse) { r.VerifyHeader.BodySignature.Scheme = 3 }, + }, + {err: "verify signature of the meta header: unsupported scheme 3", + corrupt: func(r *apinetmap.LocalNodeInfoResponse) { r.VerifyHeader.MetaSignature.Scheme = 3 }, + }, + {err: "verify signature of the origin verification header: unsupported scheme 3", + corrupt: func(r *apinetmap.LocalNodeInfoResponse) { r.VerifyHeader.OriginSignature.Scheme = 3 }, + }, + {err: "verify body signature: signature mismatch", + corrupt: func(r *apinetmap.LocalNodeInfoResponse) { r.VerifyHeader.BodySignature.Sign[0]++ }, + }, + {err: "verify signature of the meta header: signature mismatch", + corrupt: func(r *apinetmap.LocalNodeInfoResponse) { r.VerifyHeader.MetaSignature.Sign[0]++ }, + }, + {err: "verify signature of the origin verification header: signature mismatch", + corrupt: func(r *apinetmap.LocalNodeInfoResponse) { r.VerifyHeader.OriginSignature.Sign[0]++ }, + }, + {err: "verify body signature: signature mismatch", + corrupt: func(r *apinetmap.LocalNodeInfoResponse) { + r.VerifyHeader.BodySignature.Key = neofscrypto.PublicKeyBytes(neofscryptotest.RandomSigner().Public()) + }, + }, + {err: "verify signature of the meta header: signature mismatch", + corrupt: func(r *apinetmap.LocalNodeInfoResponse) { + r.VerifyHeader.MetaSignature.Key = neofscrypto.PublicKeyBytes(neofscryptotest.RandomSigner().Public()) + }, + }, + {err: "verify signature of the origin verification header: signature mismatch", + corrupt: func(r *apinetmap.LocalNodeInfoResponse) { + r.VerifyHeader.OriginSignature.Key = neofscrypto.PublicKeyBytes(neofscryptotest.RandomSigner().Public()) + }, + }, + } { + srv := srv + srv.corruptRespSig = testCase.corrupt + assertErr := func(err error) { + require.ErrorContains(t, err, errResponseSignature, [2]any{i, testCase}) + require.ErrorContains(t, err, testCase.err, [2]any{i, testCase}) + } + c, handlerCalled := dial(t, &srv, assertErr) + _, err := c.GetEndpointInfo(ctx, GetEndpointInfoOptions{}) + assertErr(err) + require.True(t, *handlerCalled) + } + }) + t.Run("invalid response status", func(t *testing.T) { + srv := srv + srv.modifyResp = func(r *apinetmap.LocalNodeInfoResponse) { + r.MetaHeader.Status = &status.Status{Code: status.InternalServerError, Details: make([]*status.Status_Detail, 1)} + } + assertErr := func(err error) { + require.ErrorContains(t, err, errInvalidResponseStatus) + require.ErrorContains(t, err, "details attached but not supported") + } + c, handlerCalled := dial(t, &srv, assertErr) + _, err := c.GetEndpointInfo(ctx, GetEndpointInfoOptions{}) + assertErr(err) + require.True(t, *handlerCalled) + }) + t.Run("status errors", func(t *testing.T) { + for _, testCase := range []struct { + code uint32 + errConst error + errVar any + }{ + {code: 1 << 32 / 2}, + {code: status.InternalServerError, errConst: apistatus.ErrServerInternal, errVar: new(apistatus.InternalServerError)}, + {code: status.SignatureVerificationFail, errConst: apistatus.ErrSignatureVerification, errVar: new(apistatus.SignatureVerificationFailure)}, + } { + srv := srv + srv.modifyResp = func(r *apinetmap.LocalNodeInfoResponse) { + r.MetaHeader.Status = &status.Status{Code: testCase.code, Message: "any message"} + } + assertErr := func(err error) { + require.ErrorIs(t, err, apistatus.Error, testCase) + require.ErrorContains(t, err, "any message", testCase) + if testCase.errConst != nil { + require.ErrorIs(t, err, testCase.errConst, testCase) + } + if testCase.errVar != nil { + require.ErrorAs(t, err, testCase.errVar, testCase) + } + } + c, handlerCalled := dial(t, &srv, assertErr) + _, err := c.GetEndpointInfo(ctx, GetEndpointInfoOptions{}) + assertErr(err) + require.True(t, *handlerCalled, testCase) + } + }) + t.Run("response body", func(t *testing.T) { + t.Run("missing", func(t *testing.T) { + srv := srv + assertErr := func(err error) { require.EqualError(t, err, "invalid response: missing body") } + c, handlerCalled := dial(t, &srv, assertErr) + srv.modifyResp = func(r *apinetmap.LocalNodeInfoResponse) { r.Body = nil } + _, err := c.GetEndpointInfo(ctx, GetEndpointInfoOptions{}) + assertErr(err) + require.True(t, *handlerCalled) + }) + t.Run("missing version", func(t *testing.T) { + srv := srv + srv.modifyResp = func(r *apinetmap.LocalNodeInfoResponse) { r.Body.Version = nil } + assertErr := func(err error) { + require.EqualError(t, err, "invalid response: invalid body: missing required field (version)") + } + c, handlerCalled := dial(t, &srv, assertErr) + _, err := c.GetEndpointInfo(ctx, GetEndpointInfoOptions{}) + assertErr(err) + require.True(t, *handlerCalled) + }) + t.Run("missing node info", func(t *testing.T) { + srv := srv + srv.modifyResp = func(r *apinetmap.LocalNodeInfoResponse) { r.Body.NodeInfo = nil } + assertErr := func(err error) { + require.EqualError(t, err, "invalid response: invalid body: missing required field (node info)") + } + c, handlerCalled := dial(t, &srv, assertErr) + _, err := c.GetEndpointInfo(ctx, GetEndpointInfoOptions{}) + assertErr(err) + require.True(t, *handlerCalled) + }) + t.Run("invalid node info", func(t *testing.T) { + for _, testCase := range []struct { + name string + err string + contains bool + corrupt func(info *apinetmap.NodeInfo) + }{ + {name: "nil public key", err: "missing public key", + corrupt: func(n *apinetmap.NodeInfo) { n.PublicKey = nil }}, + {name: "empty public key", err: "missing public key", + corrupt: func(n *apinetmap.NodeInfo) { n.PublicKey = nil }}, + {name: "nil addresses", err: "missing network endpoints", + corrupt: func(n *apinetmap.NodeInfo) { n.Addresses = nil }}, + {name: "empty addresses", err: "missing network endpoints", + corrupt: func(n *apinetmap.NodeInfo) { n.Addresses = []string{} }}, + {name: "empty address", err: "empty network endpoint #1", + corrupt: func(n *apinetmap.NodeInfo) { n.Addresses = []string{"any", "", "any"} }}, + {name: "attributes/missing key", err: "invalid attribute #1: missing key", + corrupt: func(n *apinetmap.NodeInfo) { + n.Attributes = []*apinetmap.NodeInfo_Attribute{ + {Key: "key_valid", Value: "any"}, + {Key: "", Value: "any"}, + } + }}, + {name: "attributes/repeated keys", err: "multiple attributes with key=k2", + corrupt: func(n *apinetmap.NodeInfo) { + n.Attributes = []*apinetmap.NodeInfo_Attribute{ + {Key: "k1", Value: "any"}, + {Key: "k2", Value: "1"}, + {Key: "k3", Value: "any"}, + {Key: "k2", Value: "2"}, + } + }}, + {name: "attributes/missing value", err: "invalid attribute #1 (key2): missing value", + corrupt: func(n *apinetmap.NodeInfo) { + n.Attributes = []*apinetmap.NodeInfo_Attribute{ + {Key: "key1", Value: "any"}, + {Key: "key2", Value: ""}, + } + }}, + {name: "attributes/price", err: "invalid price attribute (#1): invalid integer", contains: true, + corrupt: func(n *apinetmap.NodeInfo) { + n.Attributes = []*apinetmap.NodeInfo_Attribute{ + {Key: "any", Value: "any"}, + {Key: "Price", Value: "not_a_number"}, + } + }}, + {name: "attributes/capacity", err: "invalid capacity attribute (#1): invalid integer", contains: true, + corrupt: func(n *apinetmap.NodeInfo) { + n.Attributes = []*apinetmap.NodeInfo_Attribute{ + {Key: "any", Value: "any"}, + {Key: "Capacity", Value: "not_a_number"}, + } + }}, + } { + srv := srv + srv.modifyResp = func(r *apinetmap.LocalNodeInfoResponse) { testCase.corrupt(r.Body.NodeInfo) } + assertErr := func(err error) { + if testCase.contains { + require.ErrorContains(t, err, fmt.Sprintf("invalid response: invalid body: invalid field (node info): %s", testCase.err)) + } else { + require.EqualError(t, err, fmt.Sprintf("invalid response: invalid body: invalid field (node info): %s", testCase.err)) + } + } + c, handlerCalled := dial(t, &srv, assertErr) + _, err := c.GetEndpointInfo(ctx, GetEndpointInfoOptions{}) + assertErr(err) + require.True(t, *handlerCalled, testCase) + } + }) + }) + }) + t.Run("response info handler", func(t *testing.T) { + t.Run("OK", func(t *testing.T) { + srv := srv + srv.epoch = 3598503 + assertErr := func(err error) { require.NoError(t, err) } + respHandlerCalled := false + c, reqHandlerCalled := _dial(t, &srv, assertErr, func(opts *Options) { + opts.SetAPIResponseInfoInterceptor(func(info ResponseMetaInfo) error { + respHandlerCalled = true + require.EqualValues(t, 3598503, info.Epoch()) + require.Equal(t, neofscrypto.PublicKeyBytes(srv.serverSigner.Public()), info.ResponderKey()) + return nil + }) + }) + _, err := c.GetEndpointInfo(ctx, GetEndpointInfoOptions{}) + assertErr(err) + require.True(t, respHandlerCalled) + require.True(t, *reqHandlerCalled) + }) + t.Run("fail", func(t *testing.T) { + srv := srv + srv.epoch = 4386380643 + assertErr := func(err error) { require.ErrorContains(t, err, "intercept response info: some handler error") } + respHandlerCalled := false + c, reqHandlerCalled := _dial(t, &srv, assertErr, func(opts *Options) { + opts.SetAPIResponseInfoInterceptor(func(info ResponseMetaInfo) error { + if !respHandlerCalled { // dial + respHandlerCalled = true + return nil + } + require.EqualValues(t, 4386380643, info.Epoch()) + require.Equal(t, neofscrypto.PublicKeyBytes(srv.serverSigner.Public()), info.ResponderKey()) + return errors.New("some handler error") + }) + }) + _, err := c.GetEndpointInfo(ctx, GetEndpointInfoOptions{}) + assertErr(err) + require.True(t, respHandlerCalled) + require.True(t, *reqHandlerCalled) + }) + }) +} - // failure error - _, err = c.NetMapSnapshot(ctx, prm) - require.Error(t, err) - require.ErrorIs(t, err, apistatus.ErrServerInternal) +type networkInfoServer struct { + // client + clientSigScheme neofscrypto.Scheme + clientPubKey []byte + // server + called bool // to distinguish on-dial call + sleepDur time.Duration + endpointInfoOnDialServer + netInfo netmap.NetworkInfo + errTransport error + modifyResp func(*apinetmap.NetworkInfoResponse) + corruptRespSig func(*apinetmap.NetworkInfoResponse) +} - srv.statusOK = true +func (x *networkInfoServer) NetworkInfo(ctx context.Context, req *apinetmap.NetworkInfoRequest) (*apinetmap.NetworkInfoResponse, error) { + if x.sleepDur > 0 { + time.Sleep(x.sleepDur) + } + if x.errTransport != nil { + return nil, x.errTransport + } + var sts status.Status + resp := apinetmap.NetworkInfoResponse{ + MetaHeader: &apisession.ResponseMetaHeader{Status: &sts, Epoch: x.epoch}, + } + var err error + if ctx == nil { + sts.Code, sts.Message = status.InternalServerError, "nil context" + } else if req == nil { + sts.Code, sts.Message = status.InternalServerError, "nil request" + } else if err = neofscrypto.VerifyRequest(req, req.Body); err != nil { + sts.Code, sts.Message = status.SignatureVerificationFail, err.Error() + } else if req.VerifyHeader.BodySignature.Scheme != refs.SignatureScheme(x.clientSigScheme) || + !bytes.Equal(req.VerifyHeader.BodySignature.Key, x.clientPubKey) { + sts.Code, sts.Message = status.InternalServerError, "[test] unexpected request body signature credentials" + } else if req.VerifyHeader.MetaSignature.Scheme != refs.SignatureScheme(x.clientSigScheme) || + !bytes.Equal(req.VerifyHeader.MetaSignature.Key, x.clientPubKey) { + sts.Code, sts.Message = status.InternalServerError, "[test] unexpected request meta header signature credentials" + } else if req.VerifyHeader.OriginSignature.Scheme != refs.SignatureScheme(x.clientSigScheme) || + !bytes.Equal(req.VerifyHeader.OriginSignature.Key, x.clientPubKey) { + sts.Code, sts.Message = status.InternalServerError, "[test] unexpected origin request verification header signature credentials" + } else if req.MetaHeader != nil { + sts.Code, sts.Message = status.InternalServerError, "invalid request: meta header is set" + } else { + resp.MetaHeader.Status = nil + resp.Body = &apinetmap.NetworkInfoResponse_Body{ + NetworkInfo: new(apinetmap.NetworkInfo), + } + x.netInfo.WriteToV2(resp.Body.NetworkInfo) + } + if x.modifyResp != nil { + x.modifyResp(&resp) + } + resp.VerifyHeader, err = neofscrypto.SignResponse(x.serverSigner, &resp, resp.Body, nil) + if err != nil { + return nil, fmt.Errorf("sign response: %w", err) + } + if x.corruptRespSig != nil { + x.corruptRespSig(&resp) + } + return &resp, nil +} - // missing netmap field - _, err = c.NetMapSnapshot(ctx, prm) - require.Error(t, err) +func TestClient_GetNetworkInfo(t *testing.T) { + ctx := context.Background() + var srv networkInfoServer + srv.sleepDur = 10 * time.Millisecond + srv.serverSigner = neofscryptotest.RandomSigner() + srv.latestVersion = versiontest.Version() + srv.nodeInfo = netmaptest.NodeInfo() + srv.nodeInfo.SetPublicKey(neofscrypto.PublicKeyBytes(srv.serverSigner.Public())) + srv.netInfo = netmaptest.NetworkInfo() + _dial := func(t testing.TB, srv *networkInfoServer, assertErr func(error), customizeOpts func(*Options)) (*Client, *bool) { + var opts Options + var handlerCalled bool + opts.SetAPIRequestResultHandler(func(nodeKey []byte, endpoint string, op stat.Method, dur time.Duration, err error) { + handlerCalled = true + require.Equal(t, srv.nodeInfo.PublicKey(), nodeKey) + require.Equal(t, "localhost:8080", endpoint) + require.Equal(t, stat.MethodNetworkInfo, op) + require.Greater(t, dur, srv.sleepDur) + assertErr(err) + }) + if customizeOpts != nil { + customizeOpts(&opts) + } - srv.setNetMap = true + c, err := New(anyValidURI, opts) + require.NoError(t, err) + srv.clientSigScheme = c.signer.Scheme() + srv.clientPubKey = neofscrypto.PublicKeyBytes(c.signer.Public()) - // invalid network map - var netMap netmap.NetMap + conn := bufconn.Listen(10 << 10) + gs := grpc.NewServer() + apinetmap.RegisterNetmapServiceServer(gs, srv) + go func() { _ = gs.Serve(conn) }() + t.Cleanup(gs.Stop) - var node netmap.NodeInfo - // TODO: #260 use instance corrupter + c.dial = func(ctx context.Context, _ string) (net.Conn, error) { return conn.DialContext(ctx) } + require.NoError(t, c.Dial(ctx)) - var nodeV2 v2netmap.NodeInfo + return c, &handlerCalled + } + dial := func(t testing.TB, srv *networkInfoServer, assertErr func(error)) (*Client, *bool) { + return _dial(t, srv, assertErr, nil) + } + t.Run("OK", func(t *testing.T) { + srv := srv + assertErr := func(err error) { require.NoError(t, err) } + c, handlerCalled := dial(t, &srv, assertErr) + res, err := c.GetNetworkInfo(ctx, GetNetworkInfoOptions{}) + assertErr(err) + if !assert.ObjectsAreEqual(srv.netInfo, res) { + // can be caused by gRPC service fields, binaries must still be equal + require.Equal(t, srv.netInfo.Marshal(), res.Marshal()) + } + require.True(t, *handlerCalled) + }) + t.Run("fail", func(t *testing.T) { + t.Run("sign request", func(t *testing.T) { + srv := srv + srv.sleepDur = 0 + assertErr := func(err error) { require.ErrorContains(t, err, errSignRequest) } + c, handlerCalled := dial(t, &srv, assertErr) + c.signer = neofscryptotest.FailSigner(c.signer) + _, err := c.GetNetworkInfo(ctx, GetNetworkInfoOptions{}) + assertErr(err) + require.True(t, *handlerCalled) + }) + t.Run("transport", func(t *testing.T) { + srv := srv + srv.errTransport = errors.New("any transport failure") + assertErr := func(err error) { + require.ErrorContains(t, err, errTransport) + require.ErrorContains(t, err, "any transport failure") + } + c, handlerCalled := dial(t, &srv, assertErr) + _, err := c.GetNetworkInfo(ctx, GetNetworkInfoOptions{}) + assertErr(err) + require.True(t, *handlerCalled) + }) + t.Run("invalid response signature", func(t *testing.T) { + for i, testCase := range []struct { + err string + corrupt func(*apinetmap.NetworkInfoResponse) + }{ + {err: "missing verification header", + corrupt: func(r *apinetmap.NetworkInfoResponse) { r.VerifyHeader = nil }, + }, + {err: "missing body signature", + corrupt: func(r *apinetmap.NetworkInfoResponse) { r.VerifyHeader.BodySignature = nil }, + }, + {err: "missing signature of the meta header", + corrupt: func(r *apinetmap.NetworkInfoResponse) { r.VerifyHeader.MetaSignature = nil }, + }, + {err: "missing signature of the origin verification header", + corrupt: func(r *apinetmap.NetworkInfoResponse) { r.VerifyHeader.OriginSignature = nil }, + }, + {err: "verify body signature: missing public key", + corrupt: func(r *apinetmap.NetworkInfoResponse) { r.VerifyHeader.BodySignature.Key = nil }, + }, + {err: "verify signature of the meta header: missing public key", + corrupt: func(r *apinetmap.NetworkInfoResponse) { r.VerifyHeader.MetaSignature.Key = nil }, + }, + {err: "verify signature of the origin verification header: missing public key", + corrupt: func(r *apinetmap.NetworkInfoResponse) { r.VerifyHeader.OriginSignature.Key = nil }, + }, + {err: "verify body signature: decode public key from binary", + corrupt: func(r *apinetmap.NetworkInfoResponse) { + r.VerifyHeader.BodySignature.Key = []byte("not a public key") + }, + }, + {err: "verify signature of the meta header: decode public key from binary", + corrupt: func(r *apinetmap.NetworkInfoResponse) { + r.VerifyHeader.MetaSignature.Key = []byte("not a public key") + }, + }, + {err: "verify signature of the origin verification header: decode public key from binary", + corrupt: func(r *apinetmap.NetworkInfoResponse) { + r.VerifyHeader.OriginSignature.Key = []byte("not a public key") + }, + }, + {err: "verify body signature: invalid scheme -1", + corrupt: func(r *apinetmap.NetworkInfoResponse) { r.VerifyHeader.BodySignature.Scheme = -1 }, + }, + {err: "verify body signature: unsupported scheme 3", + corrupt: func(r *apinetmap.NetworkInfoResponse) { r.VerifyHeader.BodySignature.Scheme = 3 }, + }, + {err: "verify signature of the meta header: unsupported scheme 3", + corrupt: func(r *apinetmap.NetworkInfoResponse) { r.VerifyHeader.MetaSignature.Scheme = 3 }, + }, + {err: "verify signature of the origin verification header: unsupported scheme 3", + corrupt: func(r *apinetmap.NetworkInfoResponse) { r.VerifyHeader.OriginSignature.Scheme = 3 }, + }, + {err: "verify body signature: signature mismatch", + corrupt: func(r *apinetmap.NetworkInfoResponse) { r.VerifyHeader.BodySignature.Sign[0]++ }, + }, + {err: "verify signature of the meta header: signature mismatch", + corrupt: func(r *apinetmap.NetworkInfoResponse) { r.VerifyHeader.MetaSignature.Sign[0]++ }, + }, + {err: "verify signature of the origin verification header: signature mismatch", + corrupt: func(r *apinetmap.NetworkInfoResponse) { r.VerifyHeader.OriginSignature.Sign[0]++ }, + }, + {err: "verify body signature: signature mismatch", + corrupt: func(r *apinetmap.NetworkInfoResponse) { + r.VerifyHeader.BodySignature.Key = neofscrypto.PublicKeyBytes(neofscryptotest.RandomSigner().Public()) + }, + }, + {err: "verify signature of the meta header: signature mismatch", + corrupt: func(r *apinetmap.NetworkInfoResponse) { + r.VerifyHeader.MetaSignature.Key = neofscrypto.PublicKeyBytes(neofscryptotest.RandomSigner().Public()) + }, + }, + {err: "verify signature of the origin verification header: signature mismatch", + corrupt: func(r *apinetmap.NetworkInfoResponse) { + r.VerifyHeader.OriginSignature.Key = neofscrypto.PublicKeyBytes(neofscryptotest.RandomSigner().Public()) + }, + }, + } { + srv := srv + srv.corruptRespSig = testCase.corrupt + assertErr := func(err error) { + require.ErrorContains(t, err, errResponseSignature, [2]any{i, testCase}) + require.ErrorContains(t, err, testCase.err, [2]any{i, testCase}) + } + c, handlerCalled := dial(t, &srv, assertErr) + _, err := c.GetNetworkInfo(ctx, GetNetworkInfoOptions{}) + assertErr(err) + require.True(t, *handlerCalled) + } + }) + t.Run("invalid response status", func(t *testing.T) { + srv := srv + srv.modifyResp = func(r *apinetmap.NetworkInfoResponse) { + r.MetaHeader.Status = &status.Status{Code: status.InternalServerError, Details: make([]*status.Status_Detail, 1)} + } + assertErr := func(err error) { + require.ErrorContains(t, err, errInvalidResponseStatus) + require.ErrorContains(t, err, "details attached but not supported") + } + c, handlerCalled := dial(t, &srv, assertErr) + _, err := c.GetNetworkInfo(ctx, GetNetworkInfoOptions{}) + assertErr(err) + require.True(t, *handlerCalled) + }) + t.Run("status errors", func(t *testing.T) { + for _, testCase := range []struct { + code uint32 + errConst error + errVar any + }{ + {code: 1 << 32 / 2}, + {code: status.InternalServerError, errConst: apistatus.ErrServerInternal, errVar: new(apistatus.InternalServerError)}, + {code: status.SignatureVerificationFail, errConst: apistatus.ErrSignatureVerification, errVar: new(apistatus.SignatureVerificationFailure)}, + } { + srv := srv + srv.modifyResp = func(r *apinetmap.NetworkInfoResponse) { + r.MetaHeader.Status = &status.Status{Code: testCase.code, Message: "any message"} + } + assertErr := func(err error) { + require.ErrorIs(t, err, apistatus.Error, testCase) + require.ErrorContains(t, err, "any message", testCase) + if testCase.errConst != nil { + require.ErrorIs(t, err, testCase.errConst, testCase) + } + if testCase.errVar != nil { + require.ErrorAs(t, err, testCase.errVar, testCase) + } + } + c, handlerCalled := dial(t, &srv, assertErr) + _, err := c.GetNetworkInfo(ctx, GetNetworkInfoOptions{}) + assertErr(err) + require.True(t, *handlerCalled, testCase) + } + }) + t.Run("response body", func(t *testing.T) { + t.Run("missing", func(t *testing.T) { + srv := srv + assertErr := func(err error) { require.EqualError(t, err, "invalid response: missing body") } + c, handlerCalled := dial(t, &srv, assertErr) + srv.modifyResp = func(r *apinetmap.NetworkInfoResponse) { r.Body = nil } + _, err := c.GetNetworkInfo(ctx, GetNetworkInfoOptions{}) + assertErr(err) + require.True(t, *handlerCalled) + }) + t.Run("missing network info", func(t *testing.T) { + srv := srv + srv.modifyResp = func(r *apinetmap.NetworkInfoResponse) { r.Body.NetworkInfo = nil } + assertErr := func(err error) { + require.EqualError(t, err, "invalid response: invalid body: missing required field (network info)") + } + c, handlerCalled := dial(t, &srv, assertErr) + _, err := c.GetNetworkInfo(ctx, GetNetworkInfoOptions{}) + assertErr(err) + require.True(t, *handlerCalled) + }) + t.Run("invalid network info", func(t *testing.T) { + testCases := []struct { + name string + err string + contains bool + prm apinetmap.NetworkConfig_Parameter + }{ + {name: "nil key", err: "invalid network parameter #1: missing name", prm: apinetmap.NetworkConfig_Parameter{ + Key: nil, Value: []byte("any"), + }}, + {name: "empty key", err: "invalid network parameter #1: missing name", prm: apinetmap.NetworkConfig_Parameter{ + Key: []byte{}, Value: []byte("any"), + }}, + {name: "nil value", err: "invalid network parameter #1: missing value", prm: apinetmap.NetworkConfig_Parameter{ + Key: []byte("any"), Value: nil, + }}, + {name: "repeated keys", err: "multiple network parameters with name=any_key", prm: apinetmap.NetworkConfig_Parameter{ + Key: []byte("any_key"), Value: []byte("any"), + }}, + {name: "audit fee format", err: "invalid network parameter #1 (AuditFee): invalid numeric parameter length 13", prm: apinetmap.NetworkConfig_Parameter{ + Key: []byte("AuditFee"), Value: []byte("Hello, world!"), + }}, + {name: "storage price format", err: "invalid network parameter #1 (BasicIncomeRate): invalid numeric parameter length 13", prm: apinetmap.NetworkConfig_Parameter{ + Key: []byte("BasicIncomeRate"), Value: []byte("Hello, world!"), + }}, + {name: "container fee format", err: "invalid network parameter #1 (ContainerFee): invalid numeric parameter length 13", prm: apinetmap.NetworkConfig_Parameter{ + Key: []byte("ContainerFee"), Value: []byte("Hello, world!"), + }}, + {name: "named container fee format", err: "invalid network parameter #1 (ContainerAliasFee): invalid numeric parameter length 13", prm: apinetmap.NetworkConfig_Parameter{ + Key: []byte("ContainerAliasFee"), Value: []byte("Hello, world!"), + }}, + {name: "num of EigenTrust iterations format", err: "invalid network parameter #1 (EigenTrustIterations): invalid numeric parameter length 13", prm: apinetmap.NetworkConfig_Parameter{ + Key: []byte("EigenTrustIterations"), Value: []byte("Hello, world!"), + }}, + {name: "epoch duration format", err: "invalid network parameter #1 (EpochDuration): invalid numeric parameter length 13", prm: apinetmap.NetworkConfig_Parameter{ + Key: []byte("EpochDuration"), Value: []byte("Hello, world!"), + }}, + {name: "IR candidate fee format", err: "invalid network parameter #1 (InnerRingCandidateFee): invalid numeric parameter length 13", prm: apinetmap.NetworkConfig_Parameter{ + Key: []byte("InnerRingCandidateFee"), Value: []byte("Hello, world!"), + }}, + {name: "max object size format", err: "invalid network parameter #1 (MaxObjectSize): invalid numeric parameter length 13", prm: apinetmap.NetworkConfig_Parameter{ + Key: []byte("MaxObjectSize"), Value: []byte("Hello, world!"), + }}, + {name: "withdrawal fee format", err: "invalid network parameter #1 (WithdrawFee): invalid numeric parameter length 13", prm: apinetmap.NetworkConfig_Parameter{ + Key: []byte("WithdrawFee"), Value: []byte("Hello, world!"), + }}, + {name: "EigenTrust alpha format", err: "invalid network parameter #1 (EigenTrustAlpha): invalid numeric parameter length 13", prm: apinetmap.NetworkConfig_Parameter{ + Key: []byte("EigenTrustAlpha"), Value: []byte("Hello, world!"), + }}, + {name: "negative EigenTrust alpha", err: "invalid network parameter #1 (EigenTrustAlpha): EigenTrust alpha value -3.14 is out of range [0, 1]", prm: apinetmap.NetworkConfig_Parameter{ + Key: []byte("EigenTrustAlpha"), Value: []byte{31, 133, 235, 81, 184, 30, 9, 192}, + }}, + {name: "negative EigenTrust alpha", err: "invalid network parameter #1 (EigenTrustAlpha): EigenTrust alpha value 1.10 is out of range [0, 1]", prm: apinetmap.NetworkConfig_Parameter{ + Key: []byte("EigenTrustAlpha"), Value: []byte{154, 153, 153, 153, 153, 153, 241, 63}, + }}, + {name: "disable homomorphic hashing format", err: "invalid network parameter #1 (HomomorphicHashingDisabled): invalid bool parameter", contains: true, prm: apinetmap.NetworkConfig_Parameter{ + Key: []byte("HomomorphicHashingDisabled"), Value: make([]byte, 32+1), // max 32 + }}, + {name: "allow maintenance mode format", err: "invalid network parameter #1 (MaintenanceModeAllowed): invalid bool parameter", contains: true, prm: apinetmap.NetworkConfig_Parameter{ + Key: []byte("MaintenanceModeAllowed"), Value: make([]byte, 32+1), // max 32 + }}, + } + for i := range testCases { + srv := srv + srv.modifyResp = func(r *apinetmap.NetworkInfoResponse) { + r.Body.NetworkInfo.NetworkConfig.Parameters = []*apinetmap.NetworkConfig_Parameter{ + {Key: []byte("any_key"), Value: []byte("any_val")}, + &testCases[i].prm, + } + } + assertErr := func(err error) { + if testCases[i].contains { + require.ErrorContains(t, err, fmt.Sprintf("invalid response: invalid body: invalid field (network info): %s", testCases[i].err)) + } else { + require.EqualError(t, err, fmt.Sprintf("invalid response: invalid body: invalid field (network info): %s", testCases[i].err)) + } + } + c, handlerCalled := dial(t, &srv, assertErr) + _, err := c.GetNetworkInfo(ctx, GetNetworkInfoOptions{}) + assertErr(err) + require.True(t, *handlerCalled, testCases[i].name) + } + }) + }) + }) + t.Run("response info handler", func(t *testing.T) { + t.Run("OK", func(t *testing.T) { + srv := srv + srv.epoch = 3598503 + assertErr := func(err error) { require.NoError(t, err) } + respHandlerCalled := false + c, reqHandlerCalled := _dial(t, &srv, assertErr, func(opts *Options) { + opts.SetAPIResponseInfoInterceptor(func(info ResponseMetaInfo) error { + respHandlerCalled = true + require.EqualValues(t, 3598503, info.Epoch()) + require.Equal(t, neofscrypto.PublicKeyBytes(srv.serverSigner.Public()), info.ResponderKey()) + return nil + }) + }) + _, err := c.GetNetworkInfo(ctx, GetNetworkInfoOptions{}) + assertErr(err) + require.True(t, respHandlerCalled) + require.True(t, *reqHandlerCalled) + }) + t.Run("fail", func(t *testing.T) { + srv := srv + srv.epoch = 4386380643 + assertErr := func(err error) { require.ErrorContains(t, err, "intercept response info: some handler error") } + respHandlerCalled := false + c, reqHandlerCalled := _dial(t, &srv, assertErr, func(opts *Options) { + opts.SetAPIResponseInfoInterceptor(func(info ResponseMetaInfo) error { + if !respHandlerCalled { // dial + respHandlerCalled = true + return nil + } + require.EqualValues(t, 4386380643, info.Epoch()) + require.Equal(t, neofscrypto.PublicKeyBytes(srv.serverSigner.Public()), info.ResponderKey()) + return errors.New("some handler error") + }) + }) + _, err := c.GetNetworkInfo(ctx, GetNetworkInfoOptions{}) + assertErr(err) + require.True(t, respHandlerCalled) + require.True(t, *reqHandlerCalled) + }) + }) +} - node.WriteToV2(&nodeV2) - require.Error(t, new(netmap.NodeInfo).ReadFromV2(nodeV2)) +type currentNetmapServer struct { + // client + clientSigScheme neofscrypto.Scheme + clientPubKey []byte + // server + called bool // to distinguish on-dial call + sleepDur time.Duration + endpointInfoOnDialServer + curNetmap netmap.NetMap + errTransport error + modifyResp func(*apinetmap.NetmapSnapshotResponse) + corruptRespSig func(*apinetmap.NetmapSnapshotResponse) +} - netMap.SetNodes([]netmap.NodeInfo{node}) - netMap.WriteToV2(&srv.netMap) +func (x *currentNetmapServer) NetmapSnapshot(ctx context.Context, req *apinetmap.NetmapSnapshotRequest) (*apinetmap.NetmapSnapshotResponse, error) { + if x.sleepDur > 0 { + time.Sleep(x.sleepDur) + } + if x.errTransport != nil { + return nil, x.errTransport + } + var sts status.Status + resp := apinetmap.NetmapSnapshotResponse{ + MetaHeader: &apisession.ResponseMetaHeader{Status: &sts, Epoch: x.epoch}, + } + var err error + if ctx == nil { + sts.Code, sts.Message = status.InternalServerError, "nil context" + } else if req == nil { + sts.Code, sts.Message = status.InternalServerError, "nil request" + } else if err = neofscrypto.VerifyRequest(req, req.Body); err != nil { + sts.Code, sts.Message = status.SignatureVerificationFail, err.Error() + } else if req.VerifyHeader.BodySignature.Scheme != refs.SignatureScheme(x.clientSigScheme) || + !bytes.Equal(req.VerifyHeader.BodySignature.Key, x.clientPubKey) { + sts.Code, sts.Message = status.InternalServerError, "[test] unexpected request body signature credentials" + } else if req.VerifyHeader.MetaSignature.Scheme != refs.SignatureScheme(x.clientSigScheme) || + !bytes.Equal(req.VerifyHeader.MetaSignature.Key, x.clientPubKey) { + sts.Code, sts.Message = status.InternalServerError, "[test] unexpected request meta header signature credentials" + } else if req.VerifyHeader.OriginSignature.Scheme != refs.SignatureScheme(x.clientSigScheme) || + !bytes.Equal(req.VerifyHeader.OriginSignature.Key, x.clientPubKey) { + sts.Code, sts.Message = status.InternalServerError, "[test] unexpected origin request verification header signature credentials" + } else if req.MetaHeader != nil { + sts.Code, sts.Message = status.InternalServerError, "invalid request: meta header is set" + } else { + resp.MetaHeader.Status = nil + resp.Body = &apinetmap.NetmapSnapshotResponse_Body{ + Netmap: new(apinetmap.Netmap), + } + x.curNetmap.WriteToV2(resp.Body.Netmap) + } + if x.modifyResp != nil { + x.modifyResp(&resp) + } + resp.VerifyHeader, err = neofscrypto.SignResponse(x.serverSigner, &resp, resp.Body, nil) + if err != nil { + return nil, fmt.Errorf("sign response: %w", err) + } + if x.corruptRespSig != nil { + x.corruptRespSig(&resp) + } + return &resp, nil +} - _, err = c.NetMapSnapshot(ctx, prm) - require.Error(t, err) +func TestClient_GetCurrentNetmap(t *testing.T) { + ctx := context.Background() + var srv currentNetmapServer + srv.sleepDur = 10 * time.Millisecond + srv.serverSigner = neofscryptotest.RandomSigner() + srv.latestVersion = versiontest.Version() + srv.nodeInfo = netmaptest.NodeInfo() + srv.nodeInfo.SetPublicKey(neofscrypto.PublicKeyBytes(srv.serverSigner.Public())) + srv.curNetmap = netmaptest.Netmap() + _dial := func(t testing.TB, srv *currentNetmapServer, assertErr func(error), customizeOpts func(*Options)) (*Client, *bool) { + var opts Options + var handlerCalled bool + opts.SetAPIRequestResultHandler(func(nodeKey []byte, endpoint string, op stat.Method, dur time.Duration, err error) { + handlerCalled = true + require.Equal(t, srv.nodeInfo.PublicKey(), nodeKey) + require.Equal(t, "localhost:8080", endpoint) + require.Equal(t, stat.MethodNetMapSnapshot, op) + require.Greater(t, dur, srv.sleepDur) + assertErr(err) + }) + if customizeOpts != nil { + customizeOpts(&opts) + } - // correct network map - // TODO: #260 use instance normalizer - node.SetPublicKey([]byte{1, 2, 3}) - node.SetNetworkEndpoints("1", "2", "3") + c, err := New(anyValidURI, opts) + require.NoError(t, err) + srv.clientSigScheme = c.signer.Scheme() + srv.clientPubKey = neofscrypto.PublicKeyBytes(c.signer.Public()) - node.WriteToV2(&nodeV2) - require.NoError(t, new(netmap.NodeInfo).ReadFromV2(nodeV2)) + conn := bufconn.Listen(10 << 10) + gs := grpc.NewServer() + apinetmap.RegisterNetmapServiceServer(gs, srv) + go func() { _ = gs.Serve(conn) }() + t.Cleanup(gs.Stop) - netMap.SetNodes([]netmap.NodeInfo{node}) - netMap.WriteToV2(&srv.netMap) + c.dial = func(ctx context.Context, _ string) (net.Conn, error) { return conn.DialContext(ctx) } + require.NoError(t, c.Dial(ctx)) - res, err = c.NetMapSnapshot(ctx, prm) - require.NoError(t, err) - require.Equal(t, netMap, res) + return c, &handlerCalled + } + dial := func(t testing.TB, srv *currentNetmapServer, assertErr func(error)) (*Client, *bool) { + return _dial(t, srv, assertErr, nil) + } + t.Run("OK", func(t *testing.T) { + srv := srv + assertErr := func(err error) { require.NoError(t, err) } + c, handlerCalled := dial(t, &srv, assertErr) + res, err := c.GetCurrentNetmap(ctx, GetCurrentNetmapOptions{}) + assertErr(err) + require.EqualValues(t, srv.curNetmap, res) + require.True(t, *handlerCalled) + }) + t.Run("fail", func(t *testing.T) { + t.Run("sign request", func(t *testing.T) { + srv := srv + srv.sleepDur = 0 + assertErr := func(err error) { require.ErrorContains(t, err, errSignRequest) } + c, handlerCalled := dial(t, &srv, assertErr) + c.signer = neofscryptotest.FailSigner(c.signer) + _, err := c.GetCurrentNetmap(ctx, GetCurrentNetmapOptions{}) + assertErr(err) + require.True(t, *handlerCalled) + }) + t.Run("transport", func(t *testing.T) { + srv := srv + srv.errTransport = errors.New("any transport failure") + assertErr := func(err error) { + require.ErrorContains(t, err, errTransport) + require.ErrorContains(t, err, "any transport failure") + } + c, handlerCalled := dial(t, &srv, assertErr) + _, err := c.GetCurrentNetmap(ctx, GetCurrentNetmapOptions{}) + assertErr(err) + require.True(t, *handlerCalled) + }) + t.Run("invalid response signature", func(t *testing.T) { + for i, testCase := range []struct { + err string + corrupt func(request *apinetmap.NetmapSnapshotResponse) + }{ + {err: "missing verification header", + corrupt: func(r *apinetmap.NetmapSnapshotResponse) { r.VerifyHeader = nil }, + }, + {err: "missing body signature", + corrupt: func(r *apinetmap.NetmapSnapshotResponse) { r.VerifyHeader.BodySignature = nil }, + }, + {err: "missing signature of the meta header", + corrupt: func(r *apinetmap.NetmapSnapshotResponse) { r.VerifyHeader.MetaSignature = nil }, + }, + {err: "missing signature of the origin verification header", + corrupt: func(r *apinetmap.NetmapSnapshotResponse) { r.VerifyHeader.OriginSignature = nil }, + }, + {err: "verify body signature: missing public key", + corrupt: func(r *apinetmap.NetmapSnapshotResponse) { r.VerifyHeader.BodySignature.Key = nil }, + }, + {err: "verify signature of the meta header: missing public key", + corrupt: func(r *apinetmap.NetmapSnapshotResponse) { r.VerifyHeader.MetaSignature.Key = nil }, + }, + {err: "verify signature of the origin verification header: missing public key", + corrupt: func(r *apinetmap.NetmapSnapshotResponse) { r.VerifyHeader.OriginSignature.Key = nil }, + }, + {err: "verify body signature: decode public key from binary", + corrupt: func(r *apinetmap.NetmapSnapshotResponse) { + r.VerifyHeader.BodySignature.Key = []byte("not a public key") + }, + }, + {err: "verify signature of the meta header: decode public key from binary", + corrupt: func(r *apinetmap.NetmapSnapshotResponse) { + r.VerifyHeader.MetaSignature.Key = []byte("not a public key") + }, + }, + {err: "verify signature of the origin verification header: decode public key from binary", + corrupt: func(r *apinetmap.NetmapSnapshotResponse) { + r.VerifyHeader.OriginSignature.Key = []byte("not a public key") + }, + }, + {err: "verify body signature: invalid scheme -1", + corrupt: func(r *apinetmap.NetmapSnapshotResponse) { r.VerifyHeader.BodySignature.Scheme = -1 }, + }, + {err: "verify body signature: unsupported scheme 3", + corrupt: func(r *apinetmap.NetmapSnapshotResponse) { r.VerifyHeader.BodySignature.Scheme = 3 }, + }, + {err: "verify signature of the meta header: unsupported scheme 3", + corrupt: func(r *apinetmap.NetmapSnapshotResponse) { r.VerifyHeader.MetaSignature.Scheme = 3 }, + }, + {err: "verify signature of the origin verification header: unsupported scheme 3", + corrupt: func(r *apinetmap.NetmapSnapshotResponse) { r.VerifyHeader.OriginSignature.Scheme = 3 }, + }, + {err: "verify body signature: signature mismatch", + corrupt: func(r *apinetmap.NetmapSnapshotResponse) { r.VerifyHeader.BodySignature.Sign[0]++ }, + }, + {err: "verify signature of the meta header: signature mismatch", + corrupt: func(r *apinetmap.NetmapSnapshotResponse) { r.VerifyHeader.MetaSignature.Sign[0]++ }, + }, + {err: "verify signature of the origin verification header: signature mismatch", + corrupt: func(r *apinetmap.NetmapSnapshotResponse) { r.VerifyHeader.OriginSignature.Sign[0]++ }, + }, + {err: "verify body signature: signature mismatch", + corrupt: func(r *apinetmap.NetmapSnapshotResponse) { + r.VerifyHeader.BodySignature.Key = neofscrypto.PublicKeyBytes(neofscryptotest.RandomSigner().Public()) + }, + }, + {err: "verify signature of the meta header: signature mismatch", + corrupt: func(r *apinetmap.NetmapSnapshotResponse) { + r.VerifyHeader.MetaSignature.Key = neofscrypto.PublicKeyBytes(neofscryptotest.RandomSigner().Public()) + }, + }, + {err: "verify signature of the origin verification header: signature mismatch", + corrupt: func(r *apinetmap.NetmapSnapshotResponse) { + r.VerifyHeader.OriginSignature.Key = neofscrypto.PublicKeyBytes(neofscryptotest.RandomSigner().Public()) + }, + }, + } { + srv := srv + srv.corruptRespSig = testCase.corrupt + assertErr := func(err error) { + require.ErrorContains(t, err, errResponseSignature, [2]any{i, testCase}) + require.ErrorContains(t, err, testCase.err, [2]any{i, testCase}) + } + c, handlerCalled := dial(t, &srv, assertErr) + _, err := c.GetCurrentNetmap(ctx, GetCurrentNetmapOptions{}) + assertErr(err) + require.True(t, *handlerCalled) + } + }) + t.Run("invalid response status", func(t *testing.T) { + srv := srv + srv.modifyResp = func(r *apinetmap.NetmapSnapshotResponse) { + r.MetaHeader.Status = &status.Status{Code: status.InternalServerError, Details: make([]*status.Status_Detail, 1)} + } + assertErr := func(err error) { + require.ErrorContains(t, err, errInvalidResponseStatus) + require.ErrorContains(t, err, "details attached but not supported") + } + c, handlerCalled := dial(t, &srv, assertErr) + _, err := c.GetCurrentNetmap(ctx, GetCurrentNetmapOptions{}) + assertErr(err) + require.True(t, *handlerCalled) + }) + t.Run("status errors", func(t *testing.T) { + for _, testCase := range []struct { + code uint32 + errConst error + errVar any + }{ + {code: 1 << 32 / 2}, + {code: status.InternalServerError, errConst: apistatus.ErrServerInternal, errVar: new(apistatus.InternalServerError)}, + {code: status.SignatureVerificationFail, errConst: apistatus.ErrSignatureVerification, errVar: new(apistatus.SignatureVerificationFailure)}, + } { + srv := srv + srv.modifyResp = func(r *apinetmap.NetmapSnapshotResponse) { + r.MetaHeader.Status = &status.Status{Code: testCase.code, Message: "any message"} + } + assertErr := func(err error) { + require.ErrorIs(t, err, apistatus.Error, testCase) + require.ErrorContains(t, err, "any message", testCase) + if testCase.errConst != nil { + require.ErrorIs(t, err, testCase.errConst, testCase) + } + if testCase.errVar != nil { + require.ErrorAs(t, err, testCase.errVar, testCase) + } + } + c, handlerCalled := dial(t, &srv, assertErr) + _, err := c.GetCurrentNetmap(ctx, GetCurrentNetmapOptions{}) + assertErr(err) + require.True(t, *handlerCalled, testCase) + } + }) + t.Run("response body", func(t *testing.T) { + t.Run("missing", func(t *testing.T) { + srv := srv + assertErr := func(err error) { require.EqualError(t, err, "invalid response: missing body") } + c, handlerCalled := dial(t, &srv, assertErr) + srv.modifyResp = func(r *apinetmap.NetmapSnapshotResponse) { r.Body = nil } + _, err := c.GetCurrentNetmap(ctx, GetCurrentNetmapOptions{}) + assertErr(err) + require.True(t, *handlerCalled) + }) + t.Run("missing network map", func(t *testing.T) { + srv := srv + srv.modifyResp = func(r *apinetmap.NetmapSnapshotResponse) { r.Body.Netmap = nil } + assertErr := func(err error) { + require.EqualError(t, err, "invalid response: invalid body: missing required field (network map)") + } + c, handlerCalled := dial(t, &srv, assertErr) + _, err := c.GetCurrentNetmap(ctx, GetCurrentNetmapOptions{}) + assertErr(err) + require.True(t, *handlerCalled) + }) + t.Run("invalid network map", func(t *testing.T) { + t.Run("invalid node", func(t *testing.T) { + for _, testCase := range []struct { + name string + err string + contains bool + corrupt func(*apinetmap.NodeInfo) + }{ + {name: "nil public key", err: "missing public key", + corrupt: func(n *apinetmap.NodeInfo) { n.PublicKey = nil }}, + {name: "empty public key", err: "missing public key", + corrupt: func(n *apinetmap.NodeInfo) { n.PublicKey = nil }}, + {name: "nil addresses", err: "missing network endpoints", + corrupt: func(n *apinetmap.NodeInfo) { n.Addresses = nil }}, + {name: "empty addresses", err: "missing network endpoints", + corrupt: func(n *apinetmap.NodeInfo) { n.Addresses = []string{} }}, + {name: "empty address", err: "empty network endpoint #1", + corrupt: func(n *apinetmap.NodeInfo) { n.Addresses = []string{"any", "", "any"} }}, + {name: "attributes/missing key", err: "invalid attribute #1: missing key", + corrupt: func(n *apinetmap.NodeInfo) { + n.Attributes = []*apinetmap.NodeInfo_Attribute{ + {Key: "key_valid", Value: "any"}, + {Key: "", Value: "any"}, + } + }}, + {name: "attributes/repeated keys", err: "multiple attributes with key=k2", + corrupt: func(n *apinetmap.NodeInfo) { + n.Attributes = []*apinetmap.NodeInfo_Attribute{ + {Key: "k1", Value: "any"}, + {Key: "k2", Value: "1"}, + {Key: "k3", Value: "any"}, + {Key: "k2", Value: "2"}, + } + }}, + {name: "attributes/missing value", err: "invalid attribute #1 (key2): missing value", + corrupt: func(n *apinetmap.NodeInfo) { + n.Attributes = []*apinetmap.NodeInfo_Attribute{ + {Key: "key1", Value: "any"}, + {Key: "key2", Value: ""}, + } + }}, + {name: "attributes/price", err: "invalid price attribute (#1): invalid integer", contains: true, + corrupt: func(n *apinetmap.NodeInfo) { + n.Attributes = []*apinetmap.NodeInfo_Attribute{ + {Key: "any", Value: "any"}, + {Key: "Price", Value: "not_a_number"}, + } + }}, + {name: "attributes/capacity", err: "invalid capacity attribute (#1): invalid integer", contains: true, + corrupt: func(n *apinetmap.NodeInfo) { + n.Attributes = []*apinetmap.NodeInfo_Attribute{ + {Key: "any", Value: "any"}, + {Key: "Capacity", Value: "not_a_number"}, + } + }}, + } { + srv := srv + srv.modifyResp = func(r *apinetmap.NetmapSnapshotResponse) { + r.Body.Netmap.Nodes = make([]*apinetmap.NodeInfo, 2) + for i := range r.Body.Netmap.Nodes { + r.Body.Netmap.Nodes[i] = new(apinetmap.NodeInfo) + netmaptest.NodeInfo().WriteToV2(r.Body.Netmap.Nodes[i]) + } + testCase.corrupt(r.Body.Netmap.Nodes[1]) + } + assertErr := func(err error) { + if testCase.contains { + require.ErrorContains(t, err, fmt.Sprintf("invalid response: invalid body: invalid field (network map): invalid node info #1: %s", testCase.err)) + } else { + require.EqualError(t, err, fmt.Sprintf("invalid response: invalid body: invalid field (network map): invalid node info #1: %s", testCase.err)) + } + } + c, handlerCalled := dial(t, &srv, assertErr) + _, err := c.GetCurrentNetmap(ctx, GetCurrentNetmapOptions{}) + assertErr(err) + require.True(t, *handlerCalled, testCase) + } + }) + }) + }) + }) + t.Run("response info handler", func(t *testing.T) { + t.Run("OK", func(t *testing.T) { + srv := srv + srv.epoch = 3598503 + assertErr := func(err error) { require.NoError(t, err) } + respHandlerCalled := false + c, reqHandlerCalled := _dial(t, &srv, assertErr, func(opts *Options) { + opts.SetAPIResponseInfoInterceptor(func(info ResponseMetaInfo) error { + respHandlerCalled = true + require.EqualValues(t, 3598503, info.Epoch()) + require.Equal(t, neofscrypto.PublicKeyBytes(srv.serverSigner.Public()), info.ResponderKey()) + return nil + }) + }) + _, err := c.GetCurrentNetmap(ctx, GetCurrentNetmapOptions{}) + assertErr(err) + require.True(t, respHandlerCalled) + require.True(t, *reqHandlerCalled) + }) + t.Run("fail", func(t *testing.T) { + srv := srv + srv.epoch = 4386380643 + assertErr := func(err error) { require.ErrorContains(t, err, "intercept response info: some handler error") } + respHandlerCalled := false + c, reqHandlerCalled := _dial(t, &srv, assertErr, func(opts *Options) { + opts.SetAPIResponseInfoInterceptor(func(info ResponseMetaInfo) error { + if !respHandlerCalled { // dial + respHandlerCalled = true + return nil + } + require.EqualValues(t, 4386380643, info.Epoch()) + require.Equal(t, neofscrypto.PublicKeyBytes(srv.serverSigner.Public()), info.ResponderKey()) + return errors.New("some handler error") + }) + }) + _, err := c.GetCurrentNetmap(ctx, GetCurrentNetmapOptions{}) + assertErr(err) + require.True(t, respHandlerCalled) + require.True(t, *reqHandlerCalled) + }) + }) } diff --git a/client/object_delete.go b/client/object_delete.go index 51490a962..a99856e22 100644 --- a/client/object_delete.go +++ b/client/object_delete.go @@ -4,133 +4,159 @@ import ( "context" "errors" "fmt" + "time" - "github.com/nspcc-dev/neofs-api-go/v2/acl" - v2object "github.com/nspcc-dev/neofs-api-go/v2/object" - v2refs "github.com/nspcc-dev/neofs-api-go/v2/refs" - rpcapi "github.com/nspcc-dev/neofs-api-go/v2/rpc" - "github.com/nspcc-dev/neofs-api-go/v2/rpc/client" + apiacl "github.com/nspcc-dev/neofs-sdk-go/api/acl" + apiobject "github.com/nspcc-dev/neofs-sdk-go/api/object" + "github.com/nspcc-dev/neofs-sdk-go/api/refs" + apisession "github.com/nspcc-dev/neofs-sdk-go/api/session" "github.com/nspcc-dev/neofs-sdk-go/bearer" + apistatus "github.com/nspcc-dev/neofs-sdk-go/client/status" cid "github.com/nspcc-dev/neofs-sdk-go/container/id" + neofscrypto "github.com/nspcc-dev/neofs-sdk-go/crypto" oid "github.com/nspcc-dev/neofs-sdk-go/object/id" + "github.com/nspcc-dev/neofs-sdk-go/session" "github.com/nspcc-dev/neofs-sdk-go/stat" - "github.com/nspcc-dev/neofs-sdk-go/user" ) -var ( - // special variable for test purposes only, to overwrite real RPC calls. - rpcAPIDeleteObject = rpcapi.DeleteObject +// DeleteObjectOptions groups optional parameters of [Client.DeleteObject]. +type DeleteObjectOptions struct { + sessionSet bool + session session.Object - // ErrNoSession indicates that session wasn't set in some Prm* structure. - ErrNoSession = errors.New("session is not set") -) - -// PrmObjectDelete groups optional parameters of ObjectDelete operation. -type PrmObjectDelete struct { - sessionContainer + bearerTokenSet bool + bearerToken bearer.Token } -// WithBearerToken attaches bearer token to be used for the operation. +// WithinSession specifies token of the session preliminary opened with the +// remote server. Session tokens grants user-to-user power of attorney: remote +// server still creates tombstone objects, but they are owned by the session +// issuer. Session must include [session.VerbObjectDelete] action. The token +// must be signed by the user passed to [Client.DeleteObject]. // -// If set, underlying eACL rules will be used in access control. +// With session, [Client.DeleteObject] can also return +// [apistatus.ErrSessionTokenNotFound] if the session is missing on the server +// or [apistatus.ErrSessionTokenExpired] if it has expired: this usually +// requires re-issuing the session. // -// Must be signed. -func (x *PrmObjectDelete) WithBearerToken(t bearer.Token) { - var v2token acl.BearerToken - t.WriteToV2(&v2token) - x.meta.SetBearerToken(&v2token) +// To start a session, use [Client.StartSession]. +func (x *DeleteObjectOptions) WithinSession(s session.Object) { + x.session, x.sessionSet = s, true } -// WithXHeaders specifies list of extended headers (string key-value pairs) -// to be attached to the request. Must have an even length. -// -// Slice must not be mutated until the operation completes. -func (x *PrmObjectDelete) WithXHeaders(hs ...string) { - writeXHeadersToMeta(hs, &x.meta) +// WithBearerToken attaches bearer token carrying extended ACL rules that +// replace eACL of the object's container. The token must be issued by the +// container owner and target the subject authenticated by signer passed to +// [Client.DeleteObject]. In practice, bearer token makes sense only if it +// grants deletion rights to the subject. +func (x *DeleteObjectOptions) WithBearerToken(t bearer.Token) { + x.bearerToken, x.bearerTokenSet = t, true } -// ObjectDelete marks an object for deletion from the container using NeoFS API protocol. -// As a marker, a special unit called a tombstone is placed in the container. -// It confirms the user's intent to delete the object, and is itself a container object. -// Explicit deletion is done asynchronously, and is generally not guaranteed. -// -// Any client's internal or transport errors are returned as `error`, -// see [apistatus] package for NeoFS-specific error types. +// DeleteObject sends request to remove the referenced object. If the request is +// accepted, a special marker called a tombstone is created by the remote server +// and placed in the container. The tombstone confirms the user's intent to +// delete the object, and is itself a system server-owned object in the +// container: DeleteObject returns its ID. The tombstone has limited lifetime +// depending on the server configuration. Explicit deletion is done +// asynchronously, and is generally not guaranteed. Created tombstone is owned +// by specified user. // -// Context is required and must not be nil. It is used for network communication. -// -// Signer is required and must not be nil. The operation is executed on behalf of -// the account corresponding to the specified Signer, which is taken into account, in particular, for access control. -// -// Return errors: -// - global (see Client docs) -// - [ErrMissingSigner] -// - [apistatus.ErrContainerNotFound] -// - [apistatus.ErrObjectAccessDenied] -// - [apistatus.ErrObjectLocked] -// - [apistatus.ErrSessionTokenExpired] -func (c *Client) ObjectDelete(ctx context.Context, containerID cid.ID, objectID oid.ID, signer user.Signer, prm PrmObjectDelete) (oid.ID, error) { - var ( - addr v2refs.Address - cidV2 v2refs.ContainerID - oidV2 v2refs.ObjectID - body v2object.DeleteRequestBody - err error - ) - - defer func() { - c.sendStatistic(stat.MethodObjectDelete, err)() - }() - - containerID.WriteToV2(&cidV2) - addr.SetContainerID(&cidV2) - - objectID.WriteToV2(&oidV2) - addr.SetObjectID(&oidV2) - +// DeleteObject returns: +// - [apistatus.ErrContainerNotFound] if referenced container is missing +// - [apistatus.ErrObjectAccessDenied] if signer has no access to remove the object +// - [apistatus.ErrObjectLocked] if referenced objects is locked (meaning +// protection from the removal while the lock is active) +func (c *Client) DeleteObject(ctx context.Context, cnr cid.ID, obj oid.ID, signer neofscrypto.Signer, opts DeleteObjectOptions) (oid.ID, error) { + var res oid.ID if signer == nil { - return oid.ID{}, ErrMissingSigner + return res, errMissingSigner } - // form request body - body.SetAddress(&addr) + var err error + if c.handleAPIOpResult != nil { + defer func(start time.Time) { + c.handleAPIOpResult(c.serverPubKey, c.endpoint, stat.MethodObjectDelete, time.Since(start), err) + }(time.Now()) + } // form request - var req v2object.DeleteRequest - req.SetBody(&body) - c.prepareRequest(&req, &prm.meta) - - buf := c.buffers.Get().(*[]byte) - err = signServiceMessage(signer, &req, *buf) - c.buffers.Put(buf) - if err != nil { - err = fmt.Errorf("sign request: %w", err) - return oid.ID{}, err + req := &apiobject.DeleteRequest{ + Body: &apiobject.DeleteRequest_Body{ + Address: &refs.Address{ + ContainerId: new(refs.ContainerID), + ObjectId: new(refs.ObjectID), + }, + }, + MetaHeader: &apisession.RequestMetaHeader{Ttl: 2}, + } + cnr.WriteToV2(req.Body.Address.ContainerId) + obj.WriteToV2(req.Body.Address.ObjectId) + if opts.sessionSet { + req.MetaHeader.SessionToken = new(apisession.SessionToken) + opts.session.WriteToV2(req.MetaHeader.SessionToken) + } + if opts.bearerTokenSet { + req.MetaHeader.BearerToken = new(apiacl.BearerToken) + opts.bearerToken.WriteToV2(req.MetaHeader.BearerToken) + } + // FIXME: balance requests need small fixed-size buffers for encoding, its makes + // no sense to mosh them with other buffers + buf := c.signBuffers.Get().(*[]byte) + defer c.signBuffers.Put(buf) + if req.VerifyHeader, err = neofscrypto.SignRequest(signer, req, req.Body, *buf); err != nil { + err = fmt.Errorf("%s: %w", errSignRequest, err) // for closure above + return res, err } - resp, err := rpcAPIDeleteObject(&c.c, &req, client.WithContext(ctx)) + // send request + resp, err := c.transport.object.Delete(ctx, req) if err != nil { - return oid.ID{}, err + err = fmt.Errorf("%s: %w", errTransport, err) // for closure above + return res, err } - var res oid.ID - if err = c.processResponse(resp); err != nil { - return oid.ID{}, err + // intercept response info + if c.interceptAPIRespInfo != nil { + if err = c.interceptAPIRespInfo(ResponseMetaInfo{ + key: resp.GetVerifyHeader().GetBodySignature().GetKey(), + epoch: resp.GetMetaHeader().GetEpoch(), + }); err != nil { + err = fmt.Errorf("%s: %w", errInterceptResponseInfo, err) // for closure above + return res, err + } } - const fieldTombstone = "tombstone" - - idTombV2 := resp.GetBody().GetTombstone().GetObjectID() - if idTombV2 == nil { - err = newErrMissingResponseField(fieldTombstone) - return oid.ID{}, err + // verify response integrity + if err = neofscrypto.VerifyResponse(resp, resp.Body); err != nil { + err = fmt.Errorf("%s: %w", errResponseSignature, err) // for closure above + return res, err } - - err = res.ReadFromV2(*idTombV2) + sts, err := apistatus.ErrorFromV2(resp.GetMetaHeader().GetStatus()) if err != nil { - err = newErrInvalidResponseField(fieldTombstone, err) - return oid.ID{}, err + err = fmt.Errorf("%s: %w", errInvalidResponseStatus, err) // for closure above + return res, err + } + if sts != nil { + err = sts // for closure above + return res, err } + // decode response payload + if resp.Body == nil { + err = errors.New(errMissingResponseBody) // for closure above + return res, err + } + const fieldTombstone = "tombstone" + if resp.Body.Tombstone == nil { + err = fmt.Errorf("%s (%s)", errMissingResponseBodyField, fieldTombstone) // for closure above + return res, err + } else if resp.Body.Tombstone.ObjectId == nil { + err = fmt.Errorf("%s (%s): missing ID field", errInvalidResponseBodyField, fieldTombstone) // for closure above + return res, err + } else if err = res.ReadFromV2(resp.Body.Tombstone.ObjectId); err != nil { + err = fmt.Errorf("%s (%s): invalid ID: %w", errInvalidResponseBodyField, fieldTombstone, err) // for closure above + return res, err + } return res, nil } diff --git a/client/object_delete_test.go b/client/object_delete_test.go index cde6a380e..787b1b050 100644 --- a/client/object_delete_test.go +++ b/client/object_delete_test.go @@ -1,19 +1,476 @@ package client import ( + "bytes" "context" + "errors" + "fmt" + "net" "testing" + "time" + apinetmap "github.com/nspcc-dev/neofs-sdk-go/api/netmap" + apiobject "github.com/nspcc-dev/neofs-sdk-go/api/object" + "github.com/nspcc-dev/neofs-sdk-go/api/refs" + apisession "github.com/nspcc-dev/neofs-sdk-go/api/session" + "github.com/nspcc-dev/neofs-sdk-go/api/status" + "github.com/nspcc-dev/neofs-sdk-go/bearer" + bearertest "github.com/nspcc-dev/neofs-sdk-go/bearer/test" + apistatus "github.com/nspcc-dev/neofs-sdk-go/client/status" cid "github.com/nspcc-dev/neofs-sdk-go/container/id" + cidtest "github.com/nspcc-dev/neofs-sdk-go/container/id/test" + neofscrypto "github.com/nspcc-dev/neofs-sdk-go/crypto" + neofscryptotest "github.com/nspcc-dev/neofs-sdk-go/crypto/test" + netmaptest "github.com/nspcc-dev/neofs-sdk-go/netmap/test" oid "github.com/nspcc-dev/neofs-sdk-go/object/id" + oidtest "github.com/nspcc-dev/neofs-sdk-go/object/id/test" + "github.com/nspcc-dev/neofs-sdk-go/session" + sessiontest "github.com/nspcc-dev/neofs-sdk-go/session/test" + "github.com/nspcc-dev/neofs-sdk-go/stat" + usertest "github.com/nspcc-dev/neofs-sdk-go/user/test" + versiontest "github.com/nspcc-dev/neofs-sdk-go/version/test" "github.com/stretchr/testify/require" + "google.golang.org/grpc" + "google.golang.org/grpc/test/bufconn" ) -func TestClient_ObjectDelete(t *testing.T) { - t.Run("missing signer", func(t *testing.T) { - c := newClient(t, nil) +type deleteObjectServer struct { + noOtherObjectCalls + // client + cnr cid.ID + obj oid.ID + clientSigner neofscrypto.Signer + session *session.Object + bearerToken *bearer.Token + // server + sleepDur time.Duration + endpointInfoOnDialServer + tomb oid.ID + errTransport error + modifyResp func(*apiobject.DeleteResponse) + corruptRespSig func(*apiobject.DeleteResponse) +} + +func (x deleteObjectServer) Delete(ctx context.Context, req *apiobject.DeleteRequest) (*apiobject.DeleteResponse, error) { + if x.sleepDur > 0 { + time.Sleep(x.sleepDur) + } + if x.errTransport != nil { + return nil, x.errTransport + } + var sts status.Status + resp := apiobject.DeleteResponse{ + MetaHeader: &apisession.ResponseMetaHeader{Status: &sts, Epoch: x.epoch}, + } + var err error + var cnr cid.ID + var obj oid.ID + sigScheme := refs.SignatureScheme(x.clientSigner.Scheme()) + creatorPubKey := neofscrypto.PublicKeyBytes(x.clientSigner.Public()) + if ctx == nil { + sts.Code, sts.Message = status.InternalServerError, "nil context" + } else if req == nil { + sts.Code, sts.Message = status.InternalServerError, "nil request" + } else if err = neofscrypto.VerifyRequest(req, req.Body); err != nil { + sts.Code, sts.Message = status.SignatureVerificationFail, err.Error() + } else if req.VerifyHeader.BodySignature.Scheme != sigScheme || + !bytes.Equal(req.VerifyHeader.BodySignature.Key, creatorPubKey) { + sts.Code, sts.Message = status.InternalServerError, "[test] unexpected request body signature credentials" + } else if req.VerifyHeader.MetaSignature.Scheme != sigScheme || + !bytes.Equal(req.VerifyHeader.MetaSignature.Key, creatorPubKey) { + sts.Code, sts.Message = status.InternalServerError, "[test] unexpected request meta header signature credentials" + } else if req.VerifyHeader.OriginSignature.Scheme != sigScheme || + !bytes.Equal(req.VerifyHeader.OriginSignature.Key, creatorPubKey) { + sts.Code, sts.Message = status.InternalServerError, "[test] unexpected origin request verification header signature credentials" + } else if req.MetaHeader == nil { + sts.Code, sts.Message = status.InternalServerError, "invalid request: missing meta header" + } else if req.MetaHeader.Ttl != 2 { + sts.Code, sts.Message = status.InternalServerError, fmt.Sprintf("invalid request: invalid TTL %d, expected 2", req.MetaHeader.Ttl) + } else if req.Body == nil { + sts.Code, sts.Message = status.InternalServerError, "invalid request: missing body" + } else if req.Body.Address == nil { + sts.Code, sts.Message = status.InternalServerError, "invalid request: invalid body: missing address" + } else if req.Body.Address.ObjectId == nil { + sts.Code, sts.Message = status.InternalServerError, "invalid request: invalid body: invalid address: missing ID" + } else if err = obj.ReadFromV2(req.Body.Address.ObjectId); err != nil { + sts.Code, sts.Message = status.InternalServerError, fmt.Sprintf("invalid request: invalid body: invalid address: invalid ID: %s", err) + } else if obj != x.obj { + sts.Code, sts.Message = status.InternalServerError, "[test] wrong ID" + } else if req.Body.Address.ContainerId == nil { + sts.Code, sts.Message = status.InternalServerError, "invalid request: invalid body: invalid address: missing container" + } else if err = cnr.ReadFromV2(req.Body.Address.ContainerId); err != nil { + sts.Code, sts.Message = status.InternalServerError, fmt.Sprintf("invalid request: invalid body: invalid address: invalid container: %s", err) + } else if cnr != x.cnr { + sts.Code, sts.Message = status.InternalServerError, "[test] wrong container" + } + if sts.Code == 0 && x.session != nil { + var so session.Object + if req.MetaHeader.SessionToken == nil { + sts.Code, sts.Message = status.InternalServerError, "[test] missing session token" + } else if err = so.ReadFromV2(req.MetaHeader.SessionToken); err != nil { + sts.Code, sts.Message = status.InternalServerError, fmt.Sprintf("invalid request: invalid meta header: invalid session token: %v", err) + } else if !bytes.Equal(so.Marshal(), x.session.Marshal()) { + sts.Code, sts.Message = status.InternalServerError, "[test] session token in request differs with the input one" + } + } + if sts.Code == 0 && x.bearerToken != nil { + var bt bearer.Token + if req.MetaHeader.BearerToken == nil { + sts.Code, sts.Message = status.InternalServerError, "[test] missing bearer token" + } else if err = bt.ReadFromV2(req.MetaHeader.BearerToken); err != nil { + sts.Code, sts.Message = status.InternalServerError, fmt.Sprintf("invalid request: invalid meta header: invalid bearer token: %v", err) + } else if !bytes.Equal(bt.Marshal(), x.bearerToken.Marshal()) { + sts.Code, sts.Message = status.InternalServerError, "[test] bearer token in request differs with the input one" + } + } + if sts.Code == 0 { + resp.MetaHeader.Status = nil + resp.Body = &apiobject.DeleteResponse_Body{Tombstone: &refs.Address{ObjectId: new(refs.ObjectID)}} + x.tomb.WriteToV2(resp.Body.Tombstone.ObjectId) + } + if x.modifyResp != nil { + x.modifyResp(&resp) + } + resp.VerifyHeader, err = neofscrypto.SignResponse(x.serverSigner, &resp, resp.Body, nil) + if err != nil { + return nil, fmt.Errorf("sign response: %w", err) + } + if x.corruptRespSig != nil { + x.corruptRespSig(&resp) + } + return &resp, nil +} + +func TestClient_DeleteObject(t *testing.T) { + ctx := context.Background() + var srv deleteObjectServer + srv.sleepDur = 10 * time.Millisecond + srv.serverSigner = neofscryptotest.RandomSigner() + srv.latestVersion = versiontest.Version() + srv.nodeInfo = netmaptest.NodeInfo() + srv.nodeInfo.SetPublicKey(neofscrypto.PublicKeyBytes(srv.serverSigner.Public())) + usr, _ := usertest.TwoUsers() + srv.clientSigner = usr + srv.cnr = cidtest.ID() + srv.obj = oidtest.ID() + srv.tomb = oidtest.ID() + _dial := func(t testing.TB, srv *deleteObjectServer, assertErr func(error), customizeOpts func(*Options)) (*Client, *bool) { + var opts Options + var handlerCalled bool + opts.SetAPIRequestResultHandler(func(nodeKey []byte, endpoint string, op stat.Method, dur time.Duration, err error) { + handlerCalled = true + require.Equal(t, srv.nodeInfo.PublicKey(), nodeKey) + require.Equal(t, "localhost:8080", endpoint) + require.Equal(t, stat.MethodObjectDelete, op) + require.Greater(t, dur, srv.sleepDur) + assertErr(err) + }) + if customizeOpts != nil { + customizeOpts(&opts) + } + + c, err := New(anyValidURI, opts) + require.NoError(t, err) - _, err := c.ObjectDelete(context.Background(), cid.ID{}, oid.ID{}, nil, PrmObjectDelete{}) - require.ErrorIs(t, err, ErrMissingSigner) + conn := bufconn.Listen(10 << 10) + gs := grpc.NewServer() + apinetmap.RegisterNetmapServiceServer(gs, srv) + apiobject.RegisterObjectServiceServer(gs, srv) + go func() { _ = gs.Serve(conn) }() + t.Cleanup(gs.Stop) + + c.dial = func(ctx context.Context, _ string) (net.Conn, error) { return conn.DialContext(ctx) } + require.NoError(t, c.Dial(ctx)) + + return c, &handlerCalled + } + dial := func(t testing.TB, srv *deleteObjectServer, assertErr func(error)) (*Client, *bool) { + return _dial(t, srv, assertErr, nil) + } + t.Run("invalid signer", func(t *testing.T) { + c, err := New(anyValidURI, Options{}) + require.NoError(t, err) + _, err = c.DeleteObject(ctx, srv.cnr, srv.obj, nil, DeleteObjectOptions{}) + require.ErrorIs(t, err, errMissingSigner) + }) + t.Run("OK", func(t *testing.T) { + srv := srv + assertErr := func(err error) { require.NoError(t, err) } + c, handlerCalled := dial(t, &srv, assertErr) + res, err := c.DeleteObject(ctx, srv.cnr, srv.obj, srv.clientSigner, DeleteObjectOptions{}) + assertErr(err) + require.Equal(t, srv.tomb, res) + require.True(t, *handlerCalled) + t.Run("with session", func(t *testing.T) { + srv := srv + so := sessiontest.Object() + srv.session = &so + assertErr := func(err error) { require.NoError(t, err) } + c, handlerCalled := dial(t, &srv, assertErr) + var opts DeleteObjectOptions + opts.WithinSession(so) + res, err := c.DeleteObject(ctx, srv.cnr, srv.obj, srv.clientSigner, opts) + assertErr(err) + require.Equal(t, srv.tomb, res) + require.True(t, *handlerCalled) + }) + t.Run("with bearer token", func(t *testing.T) { + srv := srv + bt := bearertest.Token() + srv.bearerToken = &bt + assertErr := func(err error) { require.NoError(t, err) } + c, handlerCalled := dial(t, &srv, assertErr) + var opts DeleteObjectOptions + opts.WithBearerToken(bt) + res, err := c.DeleteObject(ctx, srv.cnr, srv.obj, srv.clientSigner, opts) + assertErr(err) + require.Equal(t, srv.tomb, res) + require.True(t, *handlerCalled) + }) + }) + t.Run("fail", func(t *testing.T) { + t.Run("sign request", func(t *testing.T) { + srv := srv + srv.sleepDur = 0 + assertErr := func(err error) { require.ErrorContains(t, err, errSignRequest) } + c, handlerCalled := dial(t, &srv, assertErr) + _, err := c.DeleteObject(ctx, srv.cnr, srv.obj, neofscryptotest.FailSigner(srv.clientSigner), DeleteObjectOptions{}) + assertErr(err) + require.True(t, *handlerCalled) + }) + t.Run("transport", func(t *testing.T) { + srv := srv + srv.errTransport = errors.New("any transport failure") + assertErr := func(err error) { + require.ErrorContains(t, err, errTransport) + require.ErrorContains(t, err, "any transport failure") + } + c, handlerCalled := dial(t, &srv, assertErr) + _, err := c.DeleteObject(ctx, srv.cnr, srv.obj, srv.clientSigner, DeleteObjectOptions{}) + assertErr(err) + require.True(t, *handlerCalled) + }) + t.Run("invalid response signature", func(t *testing.T) { + for i, testCase := range []struct { + err string + corrupt func(*apiobject.DeleteResponse) + }{ + {err: "missing verification header", + corrupt: func(r *apiobject.DeleteResponse) { r.VerifyHeader = nil }, + }, + {err: "missing body signature", + corrupt: func(r *apiobject.DeleteResponse) { r.VerifyHeader.BodySignature = nil }, + }, + {err: "missing signature of the meta header", + corrupt: func(r *apiobject.DeleteResponse) { r.VerifyHeader.MetaSignature = nil }, + }, + {err: "missing signature of the origin verification header", + corrupt: func(r *apiobject.DeleteResponse) { r.VerifyHeader.OriginSignature = nil }, + }, + {err: "verify body signature: missing public key", + corrupt: func(r *apiobject.DeleteResponse) { r.VerifyHeader.BodySignature.Key = nil }, + }, + {err: "verify signature of the meta header: missing public key", + corrupt: func(r *apiobject.DeleteResponse) { r.VerifyHeader.MetaSignature.Key = nil }, + }, + {err: "verify signature of the origin verification header: missing public key", + corrupt: func(r *apiobject.DeleteResponse) { r.VerifyHeader.OriginSignature.Key = nil }, + }, + {err: "verify body signature: decode public key from binary", + corrupt: func(r *apiobject.DeleteResponse) { + r.VerifyHeader.BodySignature.Key = []byte("not a public key") + }, + }, + {err: "verify signature of the meta header: decode public key from binary", + corrupt: func(r *apiobject.DeleteResponse) { + r.VerifyHeader.MetaSignature.Key = []byte("not a public key") + }, + }, + {err: "verify signature of the origin verification header: decode public key from binary", + corrupt: func(r *apiobject.DeleteResponse) { + r.VerifyHeader.OriginSignature.Key = []byte("not a public key") + }, + }, + {err: "verify body signature: invalid scheme -1", + corrupt: func(r *apiobject.DeleteResponse) { r.VerifyHeader.BodySignature.Scheme = -1 }, + }, + {err: "verify body signature: unsupported scheme 3", + corrupt: func(r *apiobject.DeleteResponse) { r.VerifyHeader.BodySignature.Scheme = 3 }, + }, + {err: "verify signature of the meta header: unsupported scheme 3", + corrupt: func(r *apiobject.DeleteResponse) { r.VerifyHeader.MetaSignature.Scheme = 3 }, + }, + {err: "verify signature of the origin verification header: unsupported scheme 3", + corrupt: func(r *apiobject.DeleteResponse) { r.VerifyHeader.OriginSignature.Scheme = 3 }, + }, + {err: "verify body signature: signature mismatch", + corrupt: func(r *apiobject.DeleteResponse) { r.VerifyHeader.BodySignature.Sign[0]++ }, + }, + {err: "verify signature of the meta header: signature mismatch", + corrupt: func(r *apiobject.DeleteResponse) { r.VerifyHeader.MetaSignature.Sign[0]++ }, + }, + {err: "verify signature of the origin verification header: signature mismatch", + corrupt: func(r *apiobject.DeleteResponse) { r.VerifyHeader.OriginSignature.Sign[0]++ }, + }, + {err: "verify body signature: signature mismatch", + corrupt: func(r *apiobject.DeleteResponse) { + r.VerifyHeader.BodySignature.Key = neofscrypto.PublicKeyBytes(neofscryptotest.RandomSigner().Public()) + }, + }, + {err: "verify signature of the meta header: signature mismatch", + corrupt: func(r *apiobject.DeleteResponse) { + r.VerifyHeader.MetaSignature.Key = neofscrypto.PublicKeyBytes(neofscryptotest.RandomSigner().Public()) + }, + }, + {err: "verify signature of the origin verification header: signature mismatch", + corrupt: func(r *apiobject.DeleteResponse) { + r.VerifyHeader.OriginSignature.Key = neofscrypto.PublicKeyBytes(neofscryptotest.RandomSigner().Public()) + }, + }, + } { + srv := srv + srv.corruptRespSig = testCase.corrupt + assertErr := func(err error) { + require.ErrorContains(t, err, errResponseSignature, [2]any{i, testCase}) + require.ErrorContains(t, err, testCase.err, [2]any{i, testCase}) + } + c, handlerCalled := dial(t, &srv, assertErr) + _, err := c.DeleteObject(ctx, srv.cnr, srv.obj, srv.clientSigner, DeleteObjectOptions{}) + assertErr(err) + require.True(t, *handlerCalled) + } + }) + t.Run("invalid response status", func(t *testing.T) { + srv := srv + srv.modifyResp = func(r *apiobject.DeleteResponse) { + r.MetaHeader.Status = &status.Status{Code: status.InternalServerError, Details: make([]*status.Status_Detail, 1)} + } + assertErr := func(err error) { + require.ErrorContains(t, err, errInvalidResponseStatus) + require.ErrorContains(t, err, "details attached but not supported") + } + c, handlerCalled := dial(t, &srv, assertErr) + _, err := c.DeleteObject(ctx, srv.cnr, srv.obj, srv.clientSigner, DeleteObjectOptions{}) + assertErr(err) + require.True(t, *handlerCalled) + }) + t.Run("status errors", func(t *testing.T) { + for _, testCase := range []struct { + code uint32 + errConst error + errVar any + }{ + {code: 1 << 32 / 2}, + {code: status.InternalServerError, errConst: apistatus.ErrServerInternal, errVar: new(apistatus.InternalServerError)}, + {code: status.SignatureVerificationFail, errConst: apistatus.ErrSignatureVerification, errVar: new(apistatus.SignatureVerificationFailure)}, + {code: status.ContainerNotFound, errConst: apistatus.ErrContainerNotFound, errVar: new(apistatus.ContainerNotFound)}, + {code: status.ObjectAccessDenied, errConst: apistatus.ErrObjectAccessDenied, errVar: new(apistatus.ObjectAccessDenied)}, + {code: status.ObjectLocked, errConst: apistatus.ErrObjectLocked, errVar: new(apistatus.ObjectLocked)}, + {code: status.SessionTokenNotFound, errConst: apistatus.ErrSessionTokenNotFound, errVar: new(apistatus.SessionTokenNotFound)}, + {code: status.SessionTokenExpired, errConst: apistatus.ErrSessionTokenExpired, errVar: new(apistatus.SessionTokenExpired)}, + } { + srv := srv + srv.modifyResp = func(r *apiobject.DeleteResponse) { + r.MetaHeader.Status = &status.Status{Code: testCase.code, Message: "any message"} + } + assertErr := func(err error) { + require.ErrorIs(t, err, apistatus.Error, testCase) + require.ErrorContains(t, err, "any message", testCase) + if testCase.errConst != nil { + require.ErrorIs(t, err, testCase.errConst, testCase) + } + if testCase.errVar != nil { + require.ErrorAs(t, err, testCase.errVar, testCase) + } + } + c, handlerCalled := dial(t, &srv, assertErr) + _, err := c.DeleteObject(ctx, srv.cnr, srv.obj, srv.clientSigner, DeleteObjectOptions{}) + assertErr(err) + require.True(t, *handlerCalled, testCase) + } + }) + t.Run("response body", func(t *testing.T) { + t.Run("missing", func(t *testing.T) { + srv := srv + assertErr := func(err error) { require.EqualError(t, err, "invalid response: missing body") } + c, handlerCalled := dial(t, &srv, assertErr) + srv.modifyResp = func(r *apiobject.DeleteResponse) { r.Body = nil } + _, err := c.DeleteObject(ctx, srv.cnr, srv.obj, srv.clientSigner, DeleteObjectOptions{}) + assertErr(err) + require.True(t, *handlerCalled) + }) + t.Run("missing tombstone", func(t *testing.T) { + srv := srv + srv.modifyResp = func(r *apiobject.DeleteResponse) { r.Body.Tombstone = nil } + assertErr := func(err error) { + require.EqualError(t, err, "invalid response: invalid body: missing required field (tombstone)") + } + c, handlerCalled := dial(t, &srv, assertErr) + _, err := c.DeleteObject(ctx, srv.cnr, srv.obj, srv.clientSigner, DeleteObjectOptions{}) + assertErr(err) + require.True(t, *handlerCalled) + }) + t.Run("invalid tombstone", func(t *testing.T) { + for _, testCase := range []struct { + err string + corrupt func(*refs.Address) + }{ + {err: "missing ID field", corrupt: func(a *refs.Address) { a.ObjectId = nil }}, + {err: "invalid ID: missing value field", corrupt: func(a *refs.Address) { a.ObjectId.Value = nil }}, + {err: "invalid ID: invalid value length 31", corrupt: func(a *refs.Address) { a.ObjectId.Value = make([]byte, 31) }}, + } { + srv := srv + srv.modifyResp = func(r *apiobject.DeleteResponse) { testCase.corrupt(r.Body.Tombstone) } + assertErr := func(err error) { + require.EqualError(t, err, fmt.Sprintf("invalid response: invalid body: invalid field (tombstone): %s", testCase.err)) + } + c, handlerCalled := dial(t, &srv, assertErr) + _, err := c.DeleteObject(ctx, srv.cnr, srv.obj, srv.clientSigner, DeleteObjectOptions{}) + assertErr(err) + require.True(t, *handlerCalled, testCase) + } + }) + }) + }) + t.Run("response info handler", func(t *testing.T) { + t.Run("OK", func(t *testing.T) { + srv := srv + srv.epoch = 3598503 + assertErr := func(err error) { require.NoError(t, err) } + respHandlerCalled := false + c, reqHandlerCalled := _dial(t, &srv, assertErr, func(opts *Options) { + opts.SetAPIResponseInfoInterceptor(func(info ResponseMetaInfo) error { + respHandlerCalled = true + require.EqualValues(t, 3598503, info.Epoch()) + require.Equal(t, neofscrypto.PublicKeyBytes(srv.serverSigner.Public()), info.ResponderKey()) + return nil + }) + }) + _, err := c.DeleteObject(ctx, srv.cnr, srv.obj, srv.clientSigner, DeleteObjectOptions{}) + assertErr(err) + require.True(t, respHandlerCalled) + require.True(t, *reqHandlerCalled) + }) + t.Run("fail", func(t *testing.T) { + srv := srv + srv.epoch = 4386380643 + assertErr := func(err error) { require.ErrorContains(t, err, "intercept response info: some handler error") } + respHandlerCalled := false + c, reqHandlerCalled := _dial(t, &srv, assertErr, func(opts *Options) { + opts.SetAPIResponseInfoInterceptor(func(info ResponseMetaInfo) error { + if !respHandlerCalled { // dial + respHandlerCalled = true + return nil + } + require.EqualValues(t, 4386380643, info.Epoch()) + require.Equal(t, neofscrypto.PublicKeyBytes(srv.serverSigner.Public()), info.ResponderKey()) + return errors.New("some handler error") + }) + }) + _, err := c.DeleteObject(ctx, srv.cnr, srv.obj, srv.clientSigner, DeleteObjectOptions{}) + assertErr(err) + require.True(t, respHandlerCalled) + require.True(t, *reqHandlerCalled) + }) }) } diff --git a/client/object_get.go b/client/object_get.go index 18633d7bc..6a82851e0 100644 --- a/client/object_get.go +++ b/client/object_get.go @@ -4,645 +4,734 @@ import ( "context" "errors" "fmt" - "io" + "time" - "github.com/nspcc-dev/neofs-api-go/v2/acl" - v2object "github.com/nspcc-dev/neofs-api-go/v2/object" - v2refs "github.com/nspcc-dev/neofs-api-go/v2/refs" - rpcapi "github.com/nspcc-dev/neofs-api-go/v2/rpc" - "github.com/nspcc-dev/neofs-api-go/v2/rpc/client" + apiacl "github.com/nspcc-dev/neofs-sdk-go/api/acl" + apiobject "github.com/nspcc-dev/neofs-sdk-go/api/object" + "github.com/nspcc-dev/neofs-sdk-go/api/refs" + apisession "github.com/nspcc-dev/neofs-sdk-go/api/session" "github.com/nspcc-dev/neofs-sdk-go/bearer" + apistatus "github.com/nspcc-dev/neofs-sdk-go/client/status" cid "github.com/nspcc-dev/neofs-sdk-go/container/id" + neofscrypto "github.com/nspcc-dev/neofs-sdk-go/crypto" "github.com/nspcc-dev/neofs-sdk-go/object" oid "github.com/nspcc-dev/neofs-sdk-go/object/id" + "github.com/nspcc-dev/neofs-sdk-go/session" "github.com/nspcc-dev/neofs-sdk-go/stat" - "github.com/nspcc-dev/neofs-sdk-go/user" ) -var ( - // special variables for test purposes only, to overwrite real RPC calls. - rpcAPIGetObject = rpcapi.GetObject - rpcAPIHeadObject = rpcapi.HeadObject - rpcAPIGetObjectRange = rpcapi.GetObjectRange -) - -// shared parameters of GET/HEAD/RANGE. -type prmObjectRead struct { - sessionContainer - - raw bool -} - -// WithXHeaders specifies list of extended headers (string key-value pairs) -// to be attached to the request. Must have an even length. // -// Slice must not be mutated until the operation completes. -func (x *prmObjectRead) WithXHeaders(hs ...string) { - writeXHeadersToMeta(hs, &x.meta) -} - -// MarkRaw marks an intent to read physically stored object. -func (x *prmObjectRead) MarkRaw() { - x.raw = true -} - -// MarkLocal tells the server to execute the operation locally. -func (x *prmObjectRead) MarkLocal() { - x.meta.SetTTL(1) -} - -// WithBearerToken attaches bearer token to be used for the operation. +// // shared parameters of GET/HEAD/RANGE. +// type prmObjectRead struct { +// sessionContainer // -// If set, underlying eACL rules will be used in access control. +// raw bool +// } // -// Must be signed. -func (x *prmObjectRead) WithBearerToken(t bearer.Token) { - var v2token acl.BearerToken - t.WriteToV2(&v2token) - x.meta.SetBearerToken(&v2token) -} - -// PrmObjectGet groups optional parameters of ObjectGetInit operation. -type PrmObjectGet struct { - prmObjectRead -} - -// PayloadReader is a data stream of the particular NeoFS object. Implements -// [io.ReadCloser]. +// // WithXHeaders specifies list of extended headers (string key-value pairs) +// // to be attached to the request. Must have an even length. +// // +// // Slice must not be mutated until the operation completes. +// func (x *prmObjectRead) WithXHeaders(hs ...string) { +// writeXHeadersToMeta(hs, &x.meta) +// } +// +// // MarkRaw marks an intent to read physically stored object. +// func (x *prmObjectRead) MarkRaw() { +// x.raw = true +// } +// +// // MarkLocal tells the server to execute the operation locally. +// func (x *prmObjectRead) MarkLocal() { +// x.meta.SetTTL(1) +// } +// +// // WithBearerToken attaches bearer token to be used for the operation. +// // +// // If set, underlying eACL rules will be used in access control. +// // +// // Must be signed. +// func (x *prmObjectRead) WithBearerToken(t bearer.Token) { +// var v2token acl.BearerToken +// t.WriteToV2(&v2token) +// x.meta.SetBearerToken(&v2token) +// } +// +// // PrmObjectGet groups optional parameters of ObjectGetInit operation. +// type PrmObjectGet struct { +// prmObjectRead +// } +// +// // PayloadReader is a data stream of the particular NeoFS object. Implements +// // [io.ReadCloser]. +// // +// // Must be initialized using Client.ObjectGetInit, any other +// // usage is unsafe. +// type PayloadReader struct { +// cancelCtxStream context.CancelFunc +// +// client *Client +// stream interface { +// Read(resp *v2object.GetResponse) error +// } +// +// err error +// +// tailPayload []byte +// +// remainingPayloadLen int +// +// statisticCallback shortStatisticCallback +// } +// +// // readHeader reads header of the object. Result means success. +// // Failure reason can be received via Close. +// func (x *PayloadReader) readHeader(dst *object.Object) bool { +// var resp v2object.GetResponse +// x.err = x.stream.Read(&resp) +// if x.err != nil { +// return false +// } +// +// x.err = x.client.processResponse(&resp) +// if x.err != nil { +// return false +// } +// +// var partInit *v2object.GetObjectPartInit +// +// switch v := resp.GetBody().GetObjectPart().(type) { +// default: +// x.err = fmt.Errorf("unexpected message instead of heading part: %T", v) +// return false +// case *v2object.SplitInfo: +// x.err = object.NewSplitInfoError(object.NewSplitInfoFromV2(v)) +// return false +// case *v2object.GetObjectPartInit: +// partInit = v +// } +// +// var objv2 v2object.Object +// +// objv2.SetObjectID(partInit.GetObjectID()) +// objv2.SetHeader(partInit.GetHeader()) +// objv2.SetSignature(partInit.GetSignature()) +// +// x.remainingPayloadLen = int(objv2.GetHeader().GetPayloadLength()) +// +// *dst = *object.NewFromV2(&objv2) // need smth better +// +// return true +// } +// +// func (x *PayloadReader) readChunk(buf []byte) (int, bool) { +// var read int +// +// // read remaining tail +// read = copy(buf, x.tailPayload) +// +// x.tailPayload = x.tailPayload[read:] +// +// if len(buf) == read { +// return read, true +// } +// +// var chunk []byte +// var lastRead int +// +// for { +// var resp v2object.GetResponse +// x.err = x.stream.Read(&resp) +// if x.err != nil { +// return read, false +// } +// +// x.err = x.client.processResponse(&resp) +// if x.err != nil { +// return read, false +// } +// +// part := resp.GetBody().GetObjectPart() +// partChunk, ok := part.(*v2object.GetObjectPartChunk) +// if !ok { +// x.err = fmt.Errorf("unexpected message instead of chunk part: %T", part) +// return read, false +// } +// +// // read new chunk +// chunk = partChunk.GetChunk() +// if len(chunk) == 0 { +// // just skip empty chunks since they are not prohibited by protocol +// continue +// } +// +// lastRead = copy(buf[read:], chunk) +// +// read += lastRead +// +// if read == len(buf) { +// // save the tail +// x.tailPayload = append(x.tailPayload, chunk[lastRead:]...) +// +// return read, true +// } +// } +// } +// +// func (x *PayloadReader) close(ignoreEOF bool) error { +// defer x.cancelCtxStream() +// +// if errors.Is(x.err, io.EOF) { +// if ignoreEOF { +// return nil +// } +// if x.remainingPayloadLen > 0 { +// return io.ErrUnexpectedEOF +// } +// } +// return x.err +// } +// +// // Close ends reading the object payload. Must be called after using the +// // PayloadReader. +// func (x *PayloadReader) Close() error { +// var err error +// if x.statisticCallback != nil { +// defer func() { +// x.statisticCallback(err) +// }() +// } +// err = x.close(true) +// return err +// } +// +// // Read implements io.Reader of the object payload. +// func (x *PayloadReader) Read(p []byte) (int, error) { +// n, ok := x.readChunk(p) +// +// x.remainingPayloadLen -= n +// +// if !ok { +// if err := x.close(false); err != nil { +// return n, err +// } +// +// return n, x.err +// } +// +// if x.remainingPayloadLen < 0 { +// return n, errors.New("payload size overflow") +// } +// +// return n, nil +// } +// +// // ObjectGetInit initiates reading an object through a remote server using NeoFS API protocol. +// // Returns header of the requested object and stream of its payload separately. +// // +// // Exactly one return value is non-nil. Resulting PayloadReader must be finally closed. +// // +// // Context is required and must not be nil. It is used for network communication. +// // +// // Signer is required and must not be nil. The operation is executed on behalf of the account corresponding to +// // the specified Signer, which is taken into account, in particular, for access control. +// // +// // Return errors: +// // - global (see Client docs) +// // - [ErrMissingSigner] +// // - *[object.SplitInfoError] (returned on virtual objects with PrmObjectGet.MakeRaw) +// // - [apistatus.ErrContainerNotFound] +// // - [apistatus.ErrObjectNotFound] +// // - [apistatus.ErrObjectAccessDenied] +// // - [apistatus.ErrObjectAlreadyRemoved] +// // - [apistatus.ErrSessionTokenExpired] +// func (c *Client) ObjectGetInit(ctx context.Context, containerID cid.ID, objectID oid.ID, signer user.Signer, prm PrmObjectGet) (object.Object, *PayloadReader, error) { +// var ( +// addr v2refs.Address +// cidV2 v2refs.ContainerID +// oidV2 v2refs.ObjectID +// body v2object.GetRequestBody +// hdr object.Object +// err error +// ) +// +// defer func() { +// c.sendStatistic(stat.MethodObjectGet, err)() +// }() +// +// if signer == nil { +// return hdr, nil, ErrMissingSigner +// } +// +// containerID.WriteToV2(&cidV2) +// addr.SetContainerID(&cidV2) +// +// objectID.WriteToV2(&oidV2) +// addr.SetObjectID(&oidV2) +// +// body.SetRaw(prm.raw) +// body.SetAddress(&addr) +// +// // form request +// var req v2object.GetRequest +// +// req.SetBody(&body) +// c.prepareRequest(&req, &prm.meta) +// buf := c.buffers.Get().(*[]byte) +// err = signServiceMessage(signer, &req, *buf) +// c.buffers.Put(buf) +// if err != nil { +// err = fmt.Errorf("sign request: %w", err) +// return hdr, nil, err +// } +// +// ctx, cancel := context.WithCancel(ctx) +// +// stream, err := rpcAPIGetObject(&c.c, &req, client.WithContext(ctx)) +// if err != nil { +// cancel() +// err = fmt.Errorf("open stream: %w", err) +// return hdr, nil, err +// } +// +// var r PayloadReader +// r.cancelCtxStream = cancel +// r.stream = stream +// r.client = c +// r.statisticCallback = func(err error) { +// c.sendStatistic(stat.MethodObjectGetStream, err) +// } +// +// if !r.readHeader(&hdr) { +// err = fmt.Errorf("header: %w", r.Close()) +// return hdr, nil, err +// } +// +// return hdr, &r, nil +// } // -// Must be initialized using Client.ObjectGetInit, any other -// usage is unsafe. -type PayloadReader struct { - cancelCtxStream context.CancelFunc - - client *Client - stream interface { - Read(resp *v2object.GetResponse) error - } - - err error - - tailPayload []byte - - remainingPayloadLen int - - statisticCallback shortStatisticCallback -} - -// readHeader reads header of the object. Result means success. -// Failure reason can be received via Close. -func (x *PayloadReader) readHeader(dst *object.Object) bool { - var resp v2object.GetResponse - x.err = x.stream.Read(&resp) - if x.err != nil { - return false - } - - x.err = x.client.processResponse(&resp) - if x.err != nil { - return false - } - - var partInit *v2object.GetObjectPartInit - - switch v := resp.GetBody().GetObjectPart().(type) { - default: - x.err = fmt.Errorf("unexpected message instead of heading part: %T", v) - return false - case *v2object.SplitInfo: - x.err = object.NewSplitInfoError(object.NewSplitInfoFromV2(v)) - return false - case *v2object.GetObjectPartInit: - partInit = v - } - - var objv2 v2object.Object - - objv2.SetObjectID(partInit.GetObjectID()) - objv2.SetHeader(partInit.GetHeader()) - objv2.SetSignature(partInit.GetSignature()) - x.remainingPayloadLen = int(objv2.GetHeader().GetPayloadLength()) +// GetObjectHeaderOptions groups optional parameters of +// [Client.GetObjectHeader]. +type GetObjectHeaderOptions struct { + local bool + raw bool - *dst = *object.NewFromV2(&objv2) // need smth better + sessionSet bool + session session.Object - return true + bearerTokenSet bool + bearerToken bearer.Token } -func (x *PayloadReader) readChunk(buf []byte) (int, bool) { - var read int - - // read remaining tail - read = copy(buf, x.tailPayload) - - x.tailPayload = x.tailPayload[read:] - - if len(buf) == read { - return read, true - } - - var chunk []byte - var lastRead int - - for { - var resp v2object.GetResponse - x.err = x.stream.Read(&resp) - if x.err != nil { - return read, false - } - - x.err = x.client.processResponse(&resp) - if x.err != nil { - return read, false - } - - part := resp.GetBody().GetObjectPart() - partChunk, ok := part.(*v2object.GetObjectPartChunk) - if !ok { - x.err = fmt.Errorf("unexpected message instead of chunk part: %T", part) - return read, false - } - - // read new chunk - chunk = partChunk.GetChunk() - if len(chunk) == 0 { - // just skip empty chunks since they are not prohibited by protocol - continue - } - - lastRead = copy(buf[read:], chunk) - - read += lastRead - - if read == len(buf) { - // save the tail - x.tailPayload = append(x.tailPayload, chunk[lastRead:]...) - - return read, true - } - } +// PreventForwarding disables request forwarding to container nodes and +// instructs the server to read object header from the local storage. +func (x *GetObjectHeaderOptions) PreventForwarding() { + x.local = true } -func (x *PayloadReader) close(ignoreEOF bool) error { - defer x.cancelCtxStream() - - if errors.Is(x.err, io.EOF) { - if ignoreEOF { - return nil - } - if x.remainingPayloadLen > 0 { - return io.ErrUnexpectedEOF - } - } - return x.err +// PreventAssembly disables assembly of object's header if the object is stored +// as split-chain of smaller objects. If PreventAssembly is given and requested +// object is actually split, [Client.GetObjectHeader] will return +// [object.SplitInfoError] carrying [object.SplitInfo] at least with last or +// linker split-chain part. For atomic objects option is no-op. +// +// PreventAssembly allows to optimize object assembly utilities and is unlikely +// needed when working with objects regularly. +func (x *GetObjectHeaderOptions) PreventAssembly() { + x.raw = true } -// Close ends reading the object payload. Must be called after using the -// PayloadReader. -func (x *PayloadReader) Close() error { - var err error - if x.statisticCallback != nil { - defer func() { - x.statisticCallback(err) - }() - } - err = x.close(true) - return err +// WithinSession specifies token of the session preliminary issued by some user +// with the client signer. Session must include [session.VerbObjectHead] action. +// The token must be signed and target the subject authenticated by signer +// passed to [Client.GetObjectHeader]. If set, the session issuer will +// be treated as the original request sender. +// +// Note that sessions affect access control only indirectly: they just replace +// request originator. +// +// With session, [Client.GetObjectHeader] can also return +// [apistatus.ErrSessionTokenExpired] if the token has expired: this usually +// requires re-issuing the session. +// +// Note that it makes no sense to start session with the server via +// [Client.StartSession] like for [Client.DeleteObject] or [Client.PutObject]. +func (x *GetObjectHeaderOptions) WithinSession(s session.Object) { + x.session, x.sessionSet = s, true } -// Read implements io.Reader of the object payload. -func (x *PayloadReader) Read(p []byte) (int, error) { - n, ok := x.readChunk(p) - - x.remainingPayloadLen -= n - - if !ok { - if err := x.close(false); err != nil { - return n, err - } - - return n, x.err - } - - if x.remainingPayloadLen < 0 { - return n, errors.New("payload size overflow") - } - - return n, nil +// WithBearerToken attaches bearer token carrying extended ACL rules that +// replace eACL of the object's container. The token must be issued by the +// container owner and target the subject authenticated by signer passed to +// [Client.GetObjectHeader]. In practice, bearer token makes sense only if it +// grants "heading" rights to the subject. +func (x *GetObjectHeaderOptions) WithBearerToken(t bearer.Token) { + x.bearerToken, x.bearerTokenSet = t, true } -// ObjectGetInit initiates reading an object through a remote server using NeoFS API protocol. -// Returns header of the requested object and stream of its payload separately. -// -// Exactly one return value is non-nil. Resulting PayloadReader must be finally closed. -// -// Context is required and must not be nil. It is used for network communication. -// -// Signer is required and must not be nil. The operation is executed on behalf of the account corresponding to -// the specified Signer, which is taken into account, in particular, for access control. -// -// Return errors: -// - global (see Client docs) -// - [ErrMissingSigner] -// - *[object.SplitInfoError] (returned on virtual objects with PrmObjectGet.MakeRaw) -// - [apistatus.ErrContainerNotFound] -// - [apistatus.ErrObjectNotFound] -// - [apistatus.ErrObjectAccessDenied] -// - [apistatus.ErrObjectAlreadyRemoved] -// - [apistatus.ErrSessionTokenExpired] -func (c *Client) ObjectGetInit(ctx context.Context, containerID cid.ID, objectID oid.ID, signer user.Signer, prm PrmObjectGet) (object.Object, *PayloadReader, error) { - var ( - addr v2refs.Address - cidV2 v2refs.ContainerID - oidV2 v2refs.ObjectID - body v2object.GetRequestBody - hdr object.Object - err error - ) - - defer func() { - c.sendStatistic(stat.MethodObjectGet, err)() - }() - +// GetObjectHeader requests header of the referenced object. When object's +// payload is not needed, GetObjectHeader should be used instead of +// [Client.GetObject] as much more efficient. +// +// GetObjectHeader returns: +// - [apistatus.ErrContainerNotFound] if referenced container is missing +// - [apistatus.ErrObjectNotFound] if the object is missing +// - [apistatus.ErrObjectAccessDenied] if signer has no access to hash the payload +// - [apistatus.ErrObjectAlreadyRemoved] if the object has already been removed +func (c *Client) GetObjectHeader(ctx context.Context, cnr cid.ID, obj oid.ID, signer neofscrypto.Signer, opts GetObjectHeaderOptions) (object.Header, error) { + var res object.Header if signer == nil { - return hdr, nil, ErrMissingSigner + return res, errMissingSigner } - containerID.WriteToV2(&cidV2) - addr.SetContainerID(&cidV2) - - objectID.WriteToV2(&oidV2) - addr.SetObjectID(&oidV2) - - body.SetRaw(prm.raw) - body.SetAddress(&addr) - - // form request - var req v2object.GetRequest - - req.SetBody(&body) - c.prepareRequest(&req, &prm.meta) - buf := c.buffers.Get().(*[]byte) - err = signServiceMessage(signer, &req, *buf) - c.buffers.Put(buf) - if err != nil { - err = fmt.Errorf("sign request: %w", err) - return hdr, nil, err + var err error + if c.handleAPIOpResult != nil { + defer func(start time.Time) { + c.handleAPIOpResult(c.serverPubKey, c.endpoint, stat.MethodObjectHead, time.Since(start), err) + }(time.Now()) } - ctx, cancel := context.WithCancel(ctx) - - stream, err := rpcAPIGetObject(&c.c, &req, client.WithContext(ctx)) + // form request + req := &apiobject.HeadRequest{ + Body: &apiobject.HeadRequest_Body{ + Address: &refs.Address{ + ContainerId: new(refs.ContainerID), + ObjectId: new(refs.ObjectID), + }, + Raw: opts.raw, + }, + MetaHeader: new(apisession.RequestMetaHeader), + } + cnr.WriteToV2(req.Body.Address.ContainerId) + obj.WriteToV2(req.Body.Address.ObjectId) + if opts.sessionSet { + req.MetaHeader.SessionToken = new(apisession.SessionToken) + opts.session.WriteToV2(req.MetaHeader.SessionToken) + } + if opts.bearerTokenSet { + req.MetaHeader.BearerToken = new(apiacl.BearerToken) + opts.bearerToken.WriteToV2(req.MetaHeader.BearerToken) + } + if opts.local { + req.MetaHeader.Ttl = 1 + } else { + req.MetaHeader.Ttl = 2 + } + // FIXME: balance requests need small fixed-size buffers for encoding, its makes + // no sense to mosh them with other buffers + buf := c.signBuffers.Get().(*[]byte) + defer c.signBuffers.Put(buf) + if req.VerifyHeader, err = neofscrypto.SignRequest(signer, req, req.Body, *buf); err != nil { + err = fmt.Errorf("%s: %w", errSignRequest, err) // for closure above + return res, err + } + + // send request + resp, err := c.transport.object.Head(ctx, req) if err != nil { - cancel() - err = fmt.Errorf("open stream: %w", err) - return hdr, nil, err - } - - var r PayloadReader - r.cancelCtxStream = cancel - r.stream = stream - r.client = c - r.statisticCallback = func(err error) { - c.sendStatistic(stat.MethodObjectGetStream, err) - } - - if !r.readHeader(&hdr) { - err = fmt.Errorf("header: %w", r.Close()) - return hdr, nil, err + err = fmt.Errorf("%s: %w", errTransport, err) // for closure above + return res, err + } + + // intercept response info + if c.interceptAPIRespInfo != nil { + if err = c.interceptAPIRespInfo(ResponseMetaInfo{ + key: resp.GetVerifyHeader().GetBodySignature().GetKey(), + epoch: resp.GetMetaHeader().GetEpoch(), + }); err != nil { + err = fmt.Errorf("%s: %w", errInterceptResponseInfo, err) // for closure above + return res, err + } } - return hdr, &r, nil -} - -// PrmObjectHead groups optional parameters of ObjectHead operation. -type PrmObjectHead struct { - prmObjectRead -} - -// ObjectHead reads object header through a remote server using NeoFS API protocol. -// -// Exactly one return value is non-nil. By default, server status is returned in res structure. -// Any client's internal or transport errors are returned as `error`, -// see [apistatus] package for NeoFS-specific error types. -// -// Context is required and must not be nil. It is used for network communication. -// -// Signer is required and must not be nil. The operation is executed on behalf of the account corresponding to -// the specified Signer, which is taken into account, in particular, for access control. -// -// Return errors: -// - global (see Client docs) -// - [ErrMissingSigner] -// - *[object.SplitInfoError] (returned on virtual objects with PrmObjectHead.MakeRaw) -// - [apistatus.ErrContainerNotFound] -// - [apistatus.ErrObjectNotFound] -// - [apistatus.ErrObjectAccessDenied] -// - [apistatus.ErrObjectAlreadyRemoved] -// - [apistatus.ErrSessionTokenExpired] -func (c *Client) ObjectHead(ctx context.Context, containerID cid.ID, objectID oid.ID, signer user.Signer, prm PrmObjectHead) (*object.Object, error) { - var ( - addr v2refs.Address - cidV2 v2refs.ContainerID - oidV2 v2refs.ObjectID - body v2object.HeadRequestBody - err error - ) - - defer func() { - c.sendStatistic(stat.MethodObjectHead, err)() - }() - - if signer == nil { - return nil, ErrMissingSigner + // verify response integrity + if err = neofscrypto.VerifyResponse(resp, resp.Body); err != nil { + err = fmt.Errorf("%s: %w", errResponseSignature, err) // for closure above + return res, err } - - containerID.WriteToV2(&cidV2) - addr.SetContainerID(&cidV2) - - objectID.WriteToV2(&oidV2) - addr.SetObjectID(&oidV2) - - body.SetRaw(prm.raw) - body.SetAddress(&addr) - - var req v2object.HeadRequest - req.SetBody(&body) - c.prepareRequest(&req, &prm.meta) - - buf := c.buffers.Get().(*[]byte) - err = signServiceMessage(signer, &req, *buf) - c.buffers.Put(buf) + sts, err := apistatus.ErrorFromV2(resp.GetMetaHeader().GetStatus()) if err != nil { - err = fmt.Errorf("sign request: %w", err) - return nil, err + err = fmt.Errorf("%s: %w", errInvalidResponseStatus, err) // for closure above + return res, err } - - resp, err := rpcAPIHeadObject(&c.c, &req, client.WithContext(ctx)) - if err != nil { - err = fmt.Errorf("write request: %w", err) - return nil, err + if sts != nil { + err = sts // for closure above + return res, err } - if err = c.processResponse(resp); err != nil { - return nil, err + // decode response payload + if resp.Body == nil { + err = errors.New(errMissingResponseBody) // for closure above + return res, err } - - switch v := resp.GetBody().GetHeaderPart().(type) { + switch f := resp.Body.Head.(type) { default: - err = fmt.Errorf("unexpected header type %T", v) - return nil, err - case *v2object.SplitInfo: - err = object.NewSplitInfoError(object.NewSplitInfoFromV2(v)) - return nil, err - case *v2object.HeaderWithSignature: - if v == nil { - return nil, errors.New("empty header") - } - - var objv2 v2object.Object - objv2.SetHeader(v.GetHeader()) - objv2.SetSignature(v.GetSignature()) - - obj := object.NewFromV2(&objv2) - obj.SetID(objectID) - - return obj, nil - } -} - -// PrmObjectRange groups optional parameters of ObjectRange operation. -type PrmObjectRange struct { - prmObjectRead -} - -// ObjectRangeReader is designed to read payload range of one object -// from NeoFS system. Implements [io.ReadCloser]. -// -// Must be initialized using Client.ObjectRangeInit, any other -// usage is unsafe. -type ObjectRangeReader struct { - cancelCtxStream context.CancelFunc - - client *Client - - err error - - stream interface { - Read(resp *v2object.GetRangeResponse) error - } - - tailPayload []byte - - remainingPayloadLen int - - statisticCallback shortStatisticCallback -} - -func (x *ObjectRangeReader) readChunk(buf []byte) (int, bool) { - var read int - - // read remaining tail - read = copy(buf, x.tailPayload) - - x.tailPayload = x.tailPayload[read:] - - if len(buf) == read { - return read, true - } - - var partChunk *v2object.GetRangePartChunk - var chunk []byte - var lastRead int - - for { - var resp v2object.GetRangeResponse - x.err = x.stream.Read(&resp) - if x.err != nil { - return read, false - } - - x.err = x.client.processResponse(&resp) - if x.err != nil { - return read, false + err = fmt.Errorf("%s: unknown/invalid oneof field (%T)", errInvalidResponseBodyField, f) // for closure above + return res, err + case *apiobject.HeadResponse_Body_Header: + const fieldHeader = "header" + if f == nil || f.Header == nil || f.Header.Header == nil { + err = fmt.Errorf("%s (%s)", errMissingResponseBodyField, fieldHeader) // for closure above + return res, err + } else if err = res.ReadFromV2(f.Header.Header); err != nil { + err = fmt.Errorf("%s (%s): %w", errInvalidResponseBodyField, fieldHeader, err) // for closure above + return res, err } - - // get chunk message - switch v := resp.GetBody().GetRangePart().(type) { - default: - x.err = fmt.Errorf("unexpected message received: %T", v) - return read, false - case *v2object.SplitInfo: - x.err = object.NewSplitInfoError(object.NewSplitInfoFromV2(v)) - return read, false - case *v2object.GetRangePartChunk: - partChunk = v + return res, nil + case *apiobject.HeadResponse_Body_SplitInfo: + if !opts.raw { + err = fmt.Errorf("%s: server responded with split info which was not requested", errInvalidResponseBody) // for closure above + return res, err } - - chunk = partChunk.GetChunk() - if len(chunk) == 0 { - // just skip empty chunks since they are not prohibited by protocol - continue + const fieldSplitInfo = "split info" + if f == nil || f.SplitInfo == nil { + err = fmt.Errorf("%s (%s)", errMissingResponseBodyField, fieldSplitInfo) // for closure above + return res, err } - - lastRead = copy(buf[read:], chunk) - - read += lastRead - - if read == len(buf) { - // save the tail - x.tailPayload = append(x.tailPayload, chunk[lastRead:]...) - - return read, true + var splitInfo object.SplitInfo + if err = splitInfo.ReadFromV2(f.SplitInfo); err != nil { + err = fmt.Errorf("%s (%s): %w", errInvalidResponseBodyField, fieldSplitInfo, err) // for closure above + return res, err } + err = object.SplitInfoError(splitInfo) // for closure above + return res, err } } -func (x *ObjectRangeReader) close(ignoreEOF bool) error { - defer x.cancelCtxStream() - - if errors.Is(x.err, io.EOF) { - if ignoreEOF { - return nil - } - if x.remainingPayloadLen > 0 { - return io.ErrUnexpectedEOF - } - } - return x.err -} - -// Close ends reading the payload range and returns the result of the operation -// along with the final results. Must be called after using the ObjectRangeReader. -// -// Any client's internal or transport errors are returned as Go built-in error. -// If Client is tuned to resolve NeoFS API statuses, then NeoFS failures -// codes are returned as error. -// -// Return errors: -// - global (see Client docs) -// - *[object.SplitInfoError] (returned on virtual objects with PrmObjectRange.MakeRaw) -// - [apistatus.ErrContainerNotFound] -// - [apistatus.ErrObjectNotFound] -// - [apistatus.ErrObjectAccessDenied] -// - [apistatus.ErrObjectAlreadyRemoved] -// - [apistatus.ErrObjectOutOfRange] -// - [apistatus.ErrSessionTokenExpired] -func (x *ObjectRangeReader) Close() error { - var err error - if x.statisticCallback != nil { - defer func() { - x.statisticCallback(err) - }() - } - err = x.close(true) - return err -} - -// Read implements io.Reader of the object payload. -func (x *ObjectRangeReader) Read(p []byte) (int, error) { - n, ok := x.readChunk(p) - - x.remainingPayloadLen -= n - - if !ok { - err := x.close(false) - if err != nil { - return n, err - } - - return n, x.err - } - - if x.remainingPayloadLen < 0 { - return n, errors.New("payload range size overflow") - } - - return n, nil -} - -// ObjectRangeInit initiates reading an object's payload range through a remote -// server using NeoFS API protocol. -// -// The call only opens the transmission channel, explicit fetching is done using the ObjectRangeReader. -// Exactly one return value is non-nil. Resulting reader must be finally closed. -// -// Context is required and must not be nil. It is used for network communication. -// -// Signer is required and must not be nil. The operation is executed on behalf of the account corresponding to -// the specified Signer, which is taken into account, in particular, for access control. -// -// Return errors: -// - [ErrZeroRangeLength] -// - [ErrMissingSigner] -func (c *Client) ObjectRangeInit(ctx context.Context, containerID cid.ID, objectID oid.ID, offset, length uint64, signer user.Signer, prm PrmObjectRange) (*ObjectRangeReader, error) { - var ( - addr v2refs.Address - cidV2 v2refs.ContainerID - oidV2 v2refs.ObjectID - rngV2 v2object.Range - body v2object.GetRangeRequestBody - err error - ) - - defer func() { - c.sendStatistic(stat.MethodObjectRange, err)() - }() - - if length == 0 { - err = ErrZeroRangeLength - return nil, err - } - - if signer == nil { - return nil, ErrMissingSigner - } - - containerID.WriteToV2(&cidV2) - addr.SetContainerID(&cidV2) - - objectID.WriteToV2(&oidV2) - addr.SetObjectID(&oidV2) - - rngV2.SetOffset(offset) - rngV2.SetLength(length) - - // form request body - body.SetRaw(prm.raw) - body.SetAddress(&addr) - body.SetRange(&rngV2) - - // form request - var req v2object.GetRangeRequest - - req.SetBody(&body) - c.prepareRequest(&req, &prm.meta) - - buf := c.buffers.Get().(*[]byte) - err = signServiceMessage(signer, &req, *buf) - c.buffers.Put(buf) - if err != nil { - err = fmt.Errorf("sign request: %w", err) - return nil, err - } - - ctx, cancel := context.WithCancel(ctx) - - stream, err := rpcAPIGetObjectRange(&c.c, &req, client.WithContext(ctx)) - if err != nil { - cancel() - err = fmt.Errorf("open stream: %w", err) - return nil, err - } - - var r ObjectRangeReader - r.remainingPayloadLen = int(length) - r.cancelCtxStream = cancel - r.stream = stream - r.client = c - r.statisticCallback = func(err error) { - c.sendStatistic(stat.MethodObjectRangeStream, err)() - } - - return &r, nil -} +// +// // PrmObjectRange groups optional parameters of ObjectRange operation. +// type PrmObjectRange struct { +// prmObjectRead +// } +// +// // ObjectRangeReader is designed to read payload range of one object +// // from NeoFS system. Implements [io.ReadCloser]. +// // +// // Must be initialized using Client.ObjectRangeInit, any other +// // usage is unsafe. +// type ObjectRangeReader struct { +// cancelCtxStream context.CancelFunc +// +// client *Client +// +// err error +// +// stream interface { +// Read(resp *v2object.GetRangeResponse) error +// } +// +// tailPayload []byte +// +// remainingPayloadLen int +// +// statisticCallback shortStatisticCallback +// } +// +// func (x *ObjectRangeReader) readChunk(buf []byte) (int, bool) { +// var read int +// +// // read remaining tail +// read = copy(buf, x.tailPayload) +// +// x.tailPayload = x.tailPayload[read:] +// +// if len(buf) == read { +// return read, true +// } +// +// var partChunk *v2object.GetRangePartChunk +// var chunk []byte +// var lastRead int +// +// for { +// var resp v2object.GetRangeResponse +// x.err = x.stream.Read(&resp) +// if x.err != nil { +// return read, false +// } +// +// x.err = x.client.processResponse(&resp) +// if x.err != nil { +// return read, false +// } +// +// // get chunk message +// switch v := resp.GetBody().GetRangePart().(type) { +// default: +// x.err = fmt.Errorf("unexpected message received: %T", v) +// return read, false +// case *v2object.SplitInfo: +// x.err = object.NewSplitInfoError(object.NewSplitInfoFromV2(v)) +// return read, false +// case *v2object.GetRangePartChunk: +// partChunk = v +// } +// +// chunk = partChunk.GetChunk() +// if len(chunk) == 0 { +// // just skip empty chunks since they are not prohibited by protocol +// continue +// } +// +// lastRead = copy(buf[read:], chunk) +// +// read += lastRead +// +// if read == len(buf) { +// // save the tail +// x.tailPayload = append(x.tailPayload, chunk[lastRead:]...) +// +// return read, true +// } +// } +// } +// +// func (x *ObjectRangeReader) close(ignoreEOF bool) error { +// defer x.cancelCtxStream() +// +// if errors.Is(x.err, io.EOF) { +// if ignoreEOF { +// return nil +// } +// if x.remainingPayloadLen > 0 { +// return io.ErrUnexpectedEOF +// } +// } +// return x.err +// } +// +// // Close ends reading the payload range and returns the result of the operation +// // along with the final results. Must be called after using the ObjectRangeReader. +// // +// // Any client's internal or transport errors are returned as Go built-in error. +// // If Client is tuned to resolve NeoFS API statuses, then NeoFS failures +// // codes are returned as error. +// // +// // Return errors: +// // - global (see Client docs) +// // - *[object.SplitInfoError] (returned on virtual objects with PrmObjectRange.MakeRaw) +// // - [apistatus.ErrContainerNotFound] +// // - [apistatus.ErrObjectNotFound] +// // - [apistatus.ErrObjectAccessDenied] +// // - [apistatus.ErrObjectAlreadyRemoved] +// // - [apistatus.ErrObjectOutOfRange] +// // - [apistatus.ErrSessionTokenExpired] +// func (x *ObjectRangeReader) Close() error { +// var err error +// if x.statisticCallback != nil { +// defer func() { +// x.statisticCallback(err) +// }() +// } +// err = x.close(true) +// return err +// } +// +// // Read implements io.Reader of the object payload. +// func (x *ObjectRangeReader) Read(p []byte) (int, error) { +// n, ok := x.readChunk(p) +// +// x.remainingPayloadLen -= n +// +// if !ok { +// err := x.close(false) +// if err != nil { +// return n, err +// } +// +// return n, x.err +// } +// +// if x.remainingPayloadLen < 0 { +// return n, errors.New("payload range size overflow") +// } +// +// return n, nil +// } +// +// // ObjectRangeInit initiates reading an object's payload range through a remote +// // server using NeoFS API protocol. +// // +// // The call only opens the transmission channel, explicit fetching is done using the ObjectRangeReader. +// // Exactly one return value is non-nil. Resulting reader must be finally closed. +// // +// // Context is required and must not be nil. It is used for network communication. +// // +// // Signer is required and must not be nil. The operation is executed on behalf of the account corresponding to +// // the specified Signer, which is taken into account, in particular, for access control. +// // +// // Return errors: +// // - [ErrZeroRangeLength] +// // - [ErrMissingSigner] +// func (c *Client) ObjectRangeInit(ctx context.Context, containerID cid.ID, objectID oid.ID, offset, length uint64, signer user.Signer, prm PrmObjectRange) (*ObjectRangeReader, error) { +// var ( +// addr v2refs.Address +// cidV2 v2refs.ContainerID +// oidV2 v2refs.ObjectID +// rngV2 v2object.Range +// body v2object.GetRangeRequestBody +// err error +// ) +// +// defer func() { +// c.sendStatistic(stat.MethodObjectRange, err)() +// }() +// +// if length == 0 { +// err = ErrZeroRangeLength +// return nil, err +// } +// +// if signer == nil { +// return nil, ErrMissingSigner +// } +// +// containerID.WriteToV2(&cidV2) +// addr.SetContainerID(&cidV2) +// +// objectID.WriteToV2(&oidV2) +// addr.SetObjectID(&oidV2) +// +// rngV2.SetOffset(offset) +// rngV2.SetLength(length) +// +// // form request body +// body.SetRaw(prm.raw) +// body.SetAddress(&addr) +// body.SetRange(&rngV2) +// +// // form request +// var req v2object.GetRangeRequest +// +// req.SetBody(&body) +// c.prepareRequest(&req, &prm.meta) +// +// buf := c.buffers.Get().(*[]byte) +// err = signServiceMessage(signer, &req, *buf) +// c.buffers.Put(buf) +// if err != nil { +// err = fmt.Errorf("sign request: %w", err) +// return nil, err +// } +// +// ctx, cancel := context.WithCancel(ctx) +// +// stream, err := rpcAPIGetObjectRange(&c.c, &req, client.WithContext(ctx)) +// if err != nil { +// cancel() +// err = fmt.Errorf("open stream: %w", err) +// return nil, err +// } +// +// var r ObjectRangeReader +// r.remainingPayloadLen = int(length) +// r.cancelCtxStream = cancel +// r.stream = stream +// r.client = c +// r.statisticCallback = func(err error) { +// c.sendStatistic(stat.MethodObjectRangeStream, err)() +// } +// +// return &r, nil +// } diff --git a/client/object_get_test.go b/client/object_get_test.go index 576063238..5d2b2606b 100644 --- a/client/object_get_test.go +++ b/client/object_get_test.go @@ -1,55 +1,585 @@ package client import ( + "bytes" "context" + "errors" + "fmt" + "net" "testing" + "time" - v2refs "github.com/nspcc-dev/neofs-api-go/v2/refs" + apinetmap "github.com/nspcc-dev/neofs-sdk-go/api/netmap" + apiobject "github.com/nspcc-dev/neofs-sdk-go/api/object" + "github.com/nspcc-dev/neofs-sdk-go/api/refs" + apisession "github.com/nspcc-dev/neofs-sdk-go/api/session" + "github.com/nspcc-dev/neofs-sdk-go/api/status" + "github.com/nspcc-dev/neofs-sdk-go/bearer" + bearertest "github.com/nspcc-dev/neofs-sdk-go/bearer/test" + apistatus "github.com/nspcc-dev/neofs-sdk-go/client/status" cid "github.com/nspcc-dev/neofs-sdk-go/container/id" + cidtest "github.com/nspcc-dev/neofs-sdk-go/container/id/test" + neofscrypto "github.com/nspcc-dev/neofs-sdk-go/crypto" + neofscryptotest "github.com/nspcc-dev/neofs-sdk-go/crypto/test" + netmaptest "github.com/nspcc-dev/neofs-sdk-go/netmap/test" + "github.com/nspcc-dev/neofs-sdk-go/object" oid "github.com/nspcc-dev/neofs-sdk-go/object/id" + oidtest "github.com/nspcc-dev/neofs-sdk-go/object/id/test" + objecttest "github.com/nspcc-dev/neofs-sdk-go/object/test" + "github.com/nspcc-dev/neofs-sdk-go/session" + sessiontest "github.com/nspcc-dev/neofs-sdk-go/session/test" + "github.com/nspcc-dev/neofs-sdk-go/stat" + usertest "github.com/nspcc-dev/neofs-sdk-go/user/test" + versiontest "github.com/nspcc-dev/neofs-sdk-go/version/test" + "github.com/stretchr/testify/assert" "github.com/stretchr/testify/require" + "google.golang.org/grpc" + "google.golang.org/grpc/test/bufconn" ) -func TestClient_Get(t *testing.T) { - t.Run("missing signer", func(t *testing.T) { - c := newClient(t, nil) - ctx := context.Background() +type getObjectHeaderServer struct { + noOtherObjectCalls + // client + cnr cid.ID + obj oid.ID + clientSigner neofscrypto.Signer + local bool + raw bool + session *session.Object + bearerToken *bearer.Token + // server + sleepDur time.Duration + endpointInfoOnDialServer + retSplitInfo bool + header object.Header + splitInfo object.SplitInfo + errTransport error + modifyResp func(*apiobject.HeadResponse) + corruptRespSig func(*apiobject.HeadResponse) +} - var nonilAddr v2refs.Address - nonilAddr.SetObjectID(new(v2refs.ObjectID)) - nonilAddr.SetContainerID(new(v2refs.ContainerID)) +func (x getObjectHeaderServer) Head(ctx context.Context, req *apiobject.HeadRequest) (*apiobject.HeadResponse, error) { + if x.sleepDur > 0 { + time.Sleep(x.sleepDur) + } + if x.errTransport != nil { + return nil, x.errTransport + } + var sts status.Status + resp := apiobject.HeadResponse{ + MetaHeader: &apisession.ResponseMetaHeader{Status: &sts, Epoch: x.epoch}, + } + var err error + var cnr cid.ID + var obj oid.ID + sigScheme := refs.SignatureScheme(x.clientSigner.Scheme()) + creatorPubKey := neofscrypto.PublicKeyBytes(x.clientSigner.Public()) + if ctx == nil { + sts.Code, sts.Message = status.InternalServerError, "nil context" + } else if req == nil { + sts.Code, sts.Message = status.InternalServerError, "nil request" + } else if err = neofscrypto.VerifyRequest(req, req.Body); err != nil { + sts.Code, sts.Message = status.SignatureVerificationFail, err.Error() + } else if req.VerifyHeader.BodySignature.Scheme != sigScheme || + !bytes.Equal(req.VerifyHeader.BodySignature.Key, creatorPubKey) { + sts.Code, sts.Message = status.InternalServerError, "[test] unexpected request body signature credentials" + } else if req.VerifyHeader.MetaSignature.Scheme != sigScheme || + !bytes.Equal(req.VerifyHeader.MetaSignature.Key, creatorPubKey) { + sts.Code, sts.Message = status.InternalServerError, "[test] unexpected request meta header signature credentials" + } else if req.VerifyHeader.OriginSignature.Scheme != sigScheme || + !bytes.Equal(req.VerifyHeader.OriginSignature.Key, creatorPubKey) { + sts.Code, sts.Message = status.InternalServerError, "[test] unexpected origin request verification header signature credentials" + } else if req.MetaHeader == nil { + sts.Code, sts.Message = status.InternalServerError, "invalid request: missing meta header" + } else if x.local && req.MetaHeader.Ttl != 1 || !x.local && req.MetaHeader.Ttl != 2 { + sts.Code, sts.Message = status.InternalServerError, fmt.Sprintf("invalid request: invalid meta header: invalid TTL %d", req.MetaHeader.Ttl) + } else if req.Body == nil { + sts.Code, sts.Message = status.InternalServerError, "invalid request: missing body" + } else if req.Body.Address == nil { + sts.Code, sts.Message = status.InternalServerError, "invalid request: invalid body: missing address" + } else if req.Body.Address.ObjectId == nil { + sts.Code, sts.Message = status.InternalServerError, "invalid request: invalid body: invalid address: missing ID" + } else if err = obj.ReadFromV2(req.Body.Address.ObjectId); err != nil { + sts.Code, sts.Message = status.InternalServerError, fmt.Sprintf("invalid request: invalid body: invalid address: invalid ID: %s", err) + } else if obj != x.obj { + sts.Code, sts.Message = status.InternalServerError, "[test] wrong ID" + } else if req.Body.Address.ContainerId == nil { + sts.Code, sts.Message = status.InternalServerError, "invalid request: invalid body: invalid address: missing container" + } else if err = cnr.ReadFromV2(req.Body.Address.ContainerId); err != nil { + sts.Code, sts.Message = status.InternalServerError, fmt.Sprintf("invalid request: invalid body: invalid address: invalid container: %s", err) + } else if cnr != x.cnr { + sts.Code, sts.Message = status.InternalServerError, "[test] wrong container" + } else if req.Body.Raw != x.raw { + sts.Code, sts.Message = status.InternalServerError, "[test] wrong raw flag" + } else if req.Body.MainOnly { + sts.Code, sts.Message = status.InternalServerError, "invalid request: invalid body: main_only flag is set" + } + if sts.Code == 0 && x.session != nil { + var so session.Object + if req.MetaHeader.SessionToken == nil { + sts.Code, sts.Message = status.InternalServerError, "[test] missing session token" + } else if err = so.ReadFromV2(req.MetaHeader.SessionToken); err != nil { + sts.Code, sts.Message = status.InternalServerError, fmt.Sprintf("invalid request: invalid meta header: invalid session token: %v", err) + } else if !bytes.Equal(so.Marshal(), x.session.Marshal()) { + sts.Code, sts.Message = status.InternalServerError, "[test] session token in request differs with the input one" + } + } + if sts.Code == 0 && x.bearerToken != nil { + var bt bearer.Token + if req.MetaHeader.BearerToken == nil { + sts.Code, sts.Message = status.InternalServerError, "[test] missing bearer token" + } else if err = bt.ReadFromV2(req.MetaHeader.BearerToken); err != nil { + sts.Code, sts.Message = status.InternalServerError, fmt.Sprintf("invalid request: invalid meta header: invalid bearer token: %v", err) + } else if !bytes.Equal(bt.Marshal(), x.bearerToken.Marshal()) { + sts.Code, sts.Message = status.InternalServerError, "[test] bearer token in request differs with the input one" + } + } + if sts.Code == 0 { + resp.MetaHeader.Status = nil + resp.Body = new(apiobject.HeadResponse_Body) + if x.retSplitInfo { + var si apiobject.SplitInfo + x.splitInfo.WriteToV2(&si) + resp.Body.Head = &apiobject.HeadResponse_Body_SplitInfo{SplitInfo: &si} + } else { + var h apiobject.Header + x.header.WriteToV2(&h) + resp.Body.Head = &apiobject.HeadResponse_Body_Header{Header: &apiobject.HeaderWithSignature{Header: &h}} + } + } + if x.modifyResp != nil { + x.modifyResp(&resp) + } + resp.VerifyHeader, err = neofscrypto.SignResponse(x.serverSigner, &resp, resp.Body, nil) + if err != nil { + return nil, fmt.Errorf("sign response: %w", err) + } + if x.corruptRespSig != nil { + x.corruptRespSig(&resp) + } + return &resp, nil +} - tt := []struct { - name string - methodCall func() error - }{ - { - "get", - func() error { - _, _, err := c.ObjectGetInit(ctx, cid.ID{}, oid.ID{}, nil, PrmObjectGet{prmObjectRead: prmObjectRead{}}) - return err - }, - }, - { - "get_range", - func() error { - _, err := c.ObjectRangeInit(ctx, cid.ID{}, oid.ID{}, 0, 1, nil, PrmObjectRange{prmObjectRead: prmObjectRead{}}) - return err - }, - }, - { - "get_head", - func() error { - _, err := c.ObjectHead(ctx, cid.ID{}, oid.ID{}, nil, PrmObjectHead{prmObjectRead: prmObjectRead{}}) - return err - }, - }, +func TestClient_GetObjectHeader(t *testing.T) { + ctx := context.Background() + var srv getObjectHeaderServer + srv.sleepDur = 10 * time.Millisecond + srv.serverSigner = neofscryptotest.RandomSigner() + srv.latestVersion = versiontest.Version() + srv.nodeInfo = netmaptest.NodeInfo() + srv.nodeInfo.SetPublicKey(neofscrypto.PublicKeyBytes(srv.serverSigner.Public())) + usr, _ := usertest.TwoUsers() + srv.clientSigner = usr + srv.cnr = cidtest.ID() + srv.obj = oidtest.ID() + srv.header = objecttest.Header() + srv.splitInfo = objecttest.SplitInfo() + _dial := func(t testing.TB, srv *getObjectHeaderServer, assertErr func(error), customizeOpts func(*Options)) (*Client, *bool) { + var opts Options + var handlerCalled bool + opts.SetAPIRequestResultHandler(func(nodeKey []byte, endpoint string, op stat.Method, dur time.Duration, err error) { + handlerCalled = true + require.Equal(t, srv.nodeInfo.PublicKey(), nodeKey) + require.Equal(t, "localhost:8080", endpoint) + require.Equal(t, stat.MethodObjectHead, op) + require.Greater(t, dur, srv.sleepDur) + assertErr(err) + }) + if customizeOpts != nil { + customizeOpts(&opts) } - for _, test := range tt { - t.Run(test.name, func(t *testing.T) { - require.ErrorIs(t, test.methodCall(), ErrMissingSigner) + c, err := New(anyValidURI, opts) + require.NoError(t, err) + + conn := bufconn.Listen(10 << 10) + gs := grpc.NewServer() + apinetmap.RegisterNetmapServiceServer(gs, srv) + apiobject.RegisterObjectServiceServer(gs, srv) + go func() { _ = gs.Serve(conn) }() + t.Cleanup(gs.Stop) + + c.dial = func(ctx context.Context, _ string) (net.Conn, error) { return conn.DialContext(ctx) } + require.NoError(t, c.Dial(ctx)) + + return c, &handlerCalled + } + dial := func(t testing.TB, srv *getObjectHeaderServer, assertErr func(error)) (*Client, *bool) { + return _dial(t, srv, assertErr, nil) + } + t.Run("invalid signer", func(t *testing.T) { + c, err := New(anyValidURI, Options{}) + require.NoError(t, err) + _, err = c.GetObjectHeader(ctx, srv.cnr, srv.obj, nil, GetObjectHeaderOptions{}) + require.ErrorIs(t, err, errMissingSigner) + }) + t.Run("OK", func(t *testing.T) { + for _, testCase := range []struct { + name string + setOpts func(srv *getObjectHeaderServer, opts *GetObjectHeaderOptions) + }{ + {name: "default", setOpts: func(srv *getObjectHeaderServer, opts *GetObjectHeaderOptions) {}}, + {name: "with session", setOpts: func(srv *getObjectHeaderServer, opts *GetObjectHeaderOptions) { + so := sessiontest.Object() + opts.WithinSession(so) + srv.session = &so + }}, + {name: "with bearer token", setOpts: func(srv *getObjectHeaderServer, opts *GetObjectHeaderOptions) { + bt := bearertest.Token() + opts.WithBearerToken(bt) + srv.bearerToken = &bt + }}, + {name: "no forwarding", setOpts: func(srv *getObjectHeaderServer, opts *GetObjectHeaderOptions) { + srv.local = true + opts.PreventForwarding() + }}, + } { + t.Run(testCase.name, func(t *testing.T) { + srv := srv + var opts GetObjectHeaderOptions + testCase.setOpts(&srv, &opts) + assertErr := func(err error) { require.NoError(t, err) } + c, handlerCalled := dial(t, &srv, assertErr) + res, err := c.GetObjectHeader(ctx, srv.cnr, srv.obj, srv.clientSigner, opts) + assertErr(err) + if !assert.ObjectsAreEqual(srv.header, res) { + // can be caused by gRPC service fields, binaries must still be equal + require.Equal(t, srv.header.Marshal(), res.Marshal()) + } + require.True(t, *handlerCalled) }) } }) + t.Run("fail", func(t *testing.T) { + t.Run("split info", func(t *testing.T) { + srv := srv + srv.raw = true + srv.retSplitInfo = true + assertErr := func(err error) { + var si object.SplitInfoError + require.ErrorAs(t, err, &si) + require.EqualValues(t, srv.splitInfo, si) + } + c, handlerCalled := dial(t, &srv, assertErr) + var opts GetObjectHeaderOptions + opts.PreventAssembly() + _, err := c.GetObjectHeader(ctx, srv.cnr, srv.obj, srv.clientSigner, opts) + assertErr(err) + require.True(t, *handlerCalled) + }) + t.Run("sign request", func(t *testing.T) { + srv := srv + srv.sleepDur = 0 + assertErr := func(err error) { require.ErrorContains(t, err, errSignRequest) } + c, handlerCalled := dial(t, &srv, assertErr) + _, err := c.GetObjectHeader(ctx, srv.cnr, srv.obj, neofscryptotest.FailSigner(srv.clientSigner), GetObjectHeaderOptions{}) + assertErr(err) + require.True(t, *handlerCalled) + }) + t.Run("transport", func(t *testing.T) { + srv := srv + srv.errTransport = errors.New("any transport failure") + assertErr := func(err error) { + require.ErrorContains(t, err, errTransport) + require.ErrorContains(t, err, "any transport failure") + } + c, handlerCalled := dial(t, &srv, assertErr) + _, err := c.GetObjectHeader(ctx, srv.cnr, srv.obj, srv.clientSigner, GetObjectHeaderOptions{}) + assertErr(err) + require.True(t, *handlerCalled) + }) + t.Run("invalid response signature", func(t *testing.T) { + for i, testCase := range []struct { + err string + corrupt func(*apiobject.HeadResponse) + }{ + {err: "missing verification header", + corrupt: func(r *apiobject.HeadResponse) { r.VerifyHeader = nil }, + }, + {err: "missing body signature", + corrupt: func(r *apiobject.HeadResponse) { r.VerifyHeader.BodySignature = nil }, + }, + {err: "missing signature of the meta header", + corrupt: func(r *apiobject.HeadResponse) { r.VerifyHeader.MetaSignature = nil }, + }, + {err: "missing signature of the origin verification header", + corrupt: func(r *apiobject.HeadResponse) { r.VerifyHeader.OriginSignature = nil }, + }, + {err: "verify body signature: missing public key", + corrupt: func(r *apiobject.HeadResponse) { r.VerifyHeader.BodySignature.Key = nil }, + }, + {err: "verify signature of the meta header: missing public key", + corrupt: func(r *apiobject.HeadResponse) { r.VerifyHeader.MetaSignature.Key = nil }, + }, + {err: "verify signature of the origin verification header: missing public key", + corrupt: func(r *apiobject.HeadResponse) { r.VerifyHeader.OriginSignature.Key = nil }, + }, + {err: "verify body signature: decode public key from binary", + corrupt: func(r *apiobject.HeadResponse) { + r.VerifyHeader.BodySignature.Key = []byte("not a public key") + }, + }, + {err: "verify signature of the meta header: decode public key from binary", + corrupt: func(r *apiobject.HeadResponse) { + r.VerifyHeader.MetaSignature.Key = []byte("not a public key") + }, + }, + {err: "verify signature of the origin verification header: decode public key from binary", + corrupt: func(r *apiobject.HeadResponse) { + r.VerifyHeader.OriginSignature.Key = []byte("not a public key") + }, + }, + {err: "verify body signature: invalid scheme -1", + corrupt: func(r *apiobject.HeadResponse) { r.VerifyHeader.BodySignature.Scheme = -1 }, + }, + {err: "verify body signature: unsupported scheme 3", + corrupt: func(r *apiobject.HeadResponse) { r.VerifyHeader.BodySignature.Scheme = 3 }, + }, + {err: "verify signature of the meta header: unsupported scheme 3", + corrupt: func(r *apiobject.HeadResponse) { r.VerifyHeader.MetaSignature.Scheme = 3 }, + }, + {err: "verify signature of the origin verification header: unsupported scheme 3", + corrupt: func(r *apiobject.HeadResponse) { r.VerifyHeader.OriginSignature.Scheme = 3 }, + }, + {err: "verify body signature: signature mismatch", + corrupt: func(r *apiobject.HeadResponse) { r.VerifyHeader.BodySignature.Sign[0]++ }, + }, + {err: "verify signature of the meta header: signature mismatch", + corrupt: func(r *apiobject.HeadResponse) { r.VerifyHeader.MetaSignature.Sign[0]++ }, + }, + {err: "verify signature of the origin verification header: signature mismatch", + corrupt: func(r *apiobject.HeadResponse) { r.VerifyHeader.OriginSignature.Sign[0]++ }, + }, + {err: "verify body signature: signature mismatch", + corrupt: func(r *apiobject.HeadResponse) { + r.VerifyHeader.BodySignature.Key = neofscrypto.PublicKeyBytes(neofscryptotest.RandomSigner().Public()) + }, + }, + {err: "verify signature of the meta header: signature mismatch", + corrupt: func(r *apiobject.HeadResponse) { + r.VerifyHeader.MetaSignature.Key = neofscrypto.PublicKeyBytes(neofscryptotest.RandomSigner().Public()) + }, + }, + {err: "verify signature of the origin verification header: signature mismatch", + corrupt: func(r *apiobject.HeadResponse) { + r.VerifyHeader.OriginSignature.Key = neofscrypto.PublicKeyBytes(neofscryptotest.RandomSigner().Public()) + }, + }, + } { + srv := srv + srv.corruptRespSig = testCase.corrupt + assertErr := func(err error) { + require.ErrorContains(t, err, errResponseSignature, [2]any{i, testCase}) + require.ErrorContains(t, err, testCase.err, [2]any{i, testCase}) + } + c, handlerCalled := dial(t, &srv, assertErr) + _, err := c.GetObjectHeader(ctx, srv.cnr, srv.obj, srv.clientSigner, GetObjectHeaderOptions{}) + assertErr(err) + require.True(t, *handlerCalled) + } + }) + t.Run("invalid response status", func(t *testing.T) { + srv := srv + srv.modifyResp = func(r *apiobject.HeadResponse) { + r.MetaHeader.Status = &status.Status{Code: status.InternalServerError, Details: make([]*status.Status_Detail, 1)} + } + assertErr := func(err error) { + require.ErrorContains(t, err, errInvalidResponseStatus) + require.ErrorContains(t, err, "details attached but not supported") + } + c, handlerCalled := dial(t, &srv, assertErr) + _, err := c.GetObjectHeader(ctx, srv.cnr, srv.obj, srv.clientSigner, GetObjectHeaderOptions{}) + assertErr(err) + require.True(t, *handlerCalled) + }) + t.Run("status errors", func(t *testing.T) { + for _, testCase := range []struct { + code uint32 + errConst error + errVar any + }{ + {code: 1 << 32 / 2}, + {code: status.InternalServerError, errConst: apistatus.ErrServerInternal, errVar: new(apistatus.InternalServerError)}, + {code: status.SignatureVerificationFail, errConst: apistatus.ErrSignatureVerification, errVar: new(apistatus.SignatureVerificationFailure)}, + {code: status.ContainerNotFound, errConst: apistatus.ErrContainerNotFound, errVar: new(apistatus.ContainerNotFound)}, + {code: status.ObjectNotFound, errConst: apistatus.ErrObjectNotFound, errVar: new(apistatus.ObjectNotFound)}, + {code: status.ObjectAccessDenied, errConst: apistatus.ErrObjectAccessDenied, errVar: new(apistatus.ObjectAccessDenied)}, + {code: status.ObjectAlreadyRemoved, errConst: apistatus.ErrObjectAlreadyRemoved, errVar: new(apistatus.ObjectAlreadyRemoved)}, + {code: status.SessionTokenExpired, errConst: apistatus.ErrSessionTokenExpired, errVar: new(apistatus.SessionTokenExpired)}, + } { + srv := srv + srv.modifyResp = func(r *apiobject.HeadResponse) { + r.MetaHeader.Status = &status.Status{Code: testCase.code, Message: "any message"} + } + assertErr := func(err error) { + require.ErrorIs(t, err, apistatus.Error, testCase) + require.ErrorContains(t, err, "any message", testCase) + if testCase.errConst != nil { + require.ErrorIs(t, err, testCase.errConst, testCase) + } + if testCase.errVar != nil { + require.ErrorAs(t, err, testCase.errVar, testCase) + } + } + c, handlerCalled := dial(t, &srv, assertErr) + _, err := c.GetObjectHeader(ctx, srv.cnr, srv.obj, srv.clientSigner, GetObjectHeaderOptions{}) + assertErr(err) + require.True(t, *handlerCalled, testCase) + } + }) + t.Run("response body", func(t *testing.T) { + t.Run("missing", func(t *testing.T) { + srv := srv + assertErr := func(err error) { require.EqualError(t, err, "invalid response: missing body") } + c, handlerCalled := dial(t, &srv, assertErr) + srv.modifyResp = func(r *apiobject.HeadResponse) { r.Body = nil } + _, err := c.GetObjectHeader(ctx, srv.cnr, srv.obj, srv.clientSigner, GetObjectHeaderOptions{}) + assertErr(err) + require.True(t, *handlerCalled) + }) + t.Run("invalid oneof field", func(t *testing.T) { + srv := srv + assertErr := func(err error) { + require.EqualError(t, err, "invalid response: invalid body: invalid field: unknown/invalid oneof field (*object.HeadResponse_Body_ShortHeader)") + } + c, handlerCalled := dial(t, &srv, assertErr) + srv.modifyResp = func(r *apiobject.HeadResponse) { r.Body.Head = new(apiobject.HeadResponse_Body_ShortHeader) } + _, err := c.GetObjectHeader(ctx, srv.cnr, srv.obj, srv.clientSigner, GetObjectHeaderOptions{}) + assertErr(err) + require.True(t, *handlerCalled) + }) + t.Run("missing header", func(t *testing.T) { + srv := srv + assertErr := func(err error) { + require.EqualError(t, err, "invalid response: invalid body: missing required field (header)") + } + c, handlerCalled := dial(t, &srv, assertErr) + srv.modifyResp = func(r *apiobject.HeadResponse) { r.Body.Head = new(apiobject.HeadResponse_Body_Header) } + _, err := c.GetObjectHeader(ctx, srv.cnr, srv.obj, srv.clientSigner, GetObjectHeaderOptions{}) + assertErr(err) + require.True(t, *handlerCalled) + srv.modifyResp = func(r *apiobject.HeadResponse) { + r.Body.Head = &apiobject.HeadResponse_Body_Header{Header: new(apiobject.HeaderWithSignature)} + } + _, err = c.GetObjectHeader(ctx, srv.cnr, srv.obj, srv.clientSigner, GetObjectHeaderOptions{}) + assertErr(err) + }) + t.Run("invalid header", func(t *testing.T) { + srv := srv + for _, testCase := range invalidObjectHeaderTestCases { + srv.modifyResp = func(r *apiobject.HeadResponse) { + testCase.corrupt(r.Body.Head.(*apiobject.HeadResponse_Body_Header).Header.Header) + } + assertErr := func(err error) { + if err.Error() != testCase.err { + require.ErrorContains(t, err, fmt.Sprintf("invalid response: invalid body: invalid field (header): %s", testCase.err)) + } + } + c, handlerCalled := dial(t, &srv, assertErr) + _, err := c.GetObjectHeader(ctx, srv.cnr, srv.obj, srv.clientSigner, GetObjectHeaderOptions{}) + assertErr(err) + require.True(t, *handlerCalled, testCase) + } + for _, testCase := range invalidObjectHeaderTestCases { + srv.modifyResp = func(r *apiobject.HeadResponse) { + testCase.corrupt(r.Body.Head.(*apiobject.HeadResponse_Body_Header).Header.Header.Split.ParentHeader) + } + assertErr := func(err error) { + if err.Error() != testCase.err { + require.ErrorContains(t, err, fmt.Sprintf("invalid response: invalid body: invalid field (header): invalid parent header: %s", testCase.err)) + } + } + c, handlerCalled := dial(t, &srv, assertErr) + _, err := c.GetObjectHeader(ctx, srv.cnr, srv.obj, srv.clientSigner, GetObjectHeaderOptions{}) + assertErr(err) + require.True(t, *handlerCalled, testCase) + } + }) + t.Run("unexpected split info", func(t *testing.T) { + srv := srv + assertErr := func(err error) { + require.EqualError(t, err, "invalid response: invalid body: server responded with split info which was not requested") + } + c, handlerCalled := dial(t, &srv, assertErr) + srv.modifyResp = func(r *apiobject.HeadResponse) { r.Body.Head = new(apiobject.HeadResponse_Body_SplitInfo) } + _, err := c.GetObjectHeader(ctx, srv.cnr, srv.obj, srv.clientSigner, GetObjectHeaderOptions{}) + assertErr(err) + require.True(t, *handlerCalled) + }) + t.Run("invalid split info", func(t *testing.T) { + for _, testCase := range []struct { + err string + corrupt func(*apiobject.SplitInfo) + }{ + {err: "invalid split ID length 15", corrupt: func(i *apiobject.SplitInfo) { i.SplitId = make([]byte, 15) }}, + {err: "both linking and last split-chain elements are missing", corrupt: func(i *apiobject.SplitInfo) { + i.LastPart, i.Link = nil, nil + }}, + {err: "invalid last split-chain element: missing value field", corrupt: func(i *apiobject.SplitInfo) { i.LastPart.Value = nil }}, + {err: "invalid last split-chain element: invalid value length 31", corrupt: func(i *apiobject.SplitInfo) { i.LastPart.Value = make([]byte, 31) }}, + {err: "invalid linking split-chain element: missing value field", corrupt: func(i *apiobject.SplitInfo) { i.Link.Value = nil }}, + {err: "invalid linking split-chain element: invalid value length 31", corrupt: func(i *apiobject.SplitInfo) { i.Link.Value = make([]byte, 31) }}, + {err: "invalid first split-chain element: missing value field", corrupt: func(i *apiobject.SplitInfo) { i.FirstPart.Value = nil }}, + {err: "invalid first split-chain element: invalid value length 31", corrupt: func(i *apiobject.SplitInfo) { i.FirstPart.Value = make([]byte, 31) }}, + } { + srv := srv + srv.raw = true + srv.retSplitInfo = true + srv.modifyResp = func(r *apiobject.HeadResponse) { + testCase.corrupt(r.Body.Head.(*apiobject.HeadResponse_Body_SplitInfo).SplitInfo) + } + assertErr := func(err error) { + require.EqualError(t, err, fmt.Sprintf("invalid response: invalid body: invalid field (split info): %s", testCase.err)) + } + c, handlerCalled := dial(t, &srv, assertErr) + var opts GetObjectHeaderOptions + opts.PreventAssembly() + _, err := c.GetObjectHeader(ctx, srv.cnr, srv.obj, srv.clientSigner, opts) + assertErr(err) + require.True(t, *handlerCalled, testCase) + } + }) + }) + }) + t.Run("response info handler", func(t *testing.T) { + t.Run("OK", func(t *testing.T) { + srv := srv + srv.epoch = 3598503 + assertErr := func(err error) { require.NoError(t, err) } + respHandlerCalled := false + c, reqHandlerCalled := _dial(t, &srv, assertErr, func(opts *Options) { + opts.SetAPIResponseInfoInterceptor(func(info ResponseMetaInfo) error { + respHandlerCalled = true + require.EqualValues(t, 3598503, info.Epoch()) + require.Equal(t, neofscrypto.PublicKeyBytes(srv.serverSigner.Public()), info.ResponderKey()) + return nil + }) + }) + _, err := c.GetObjectHeader(ctx, srv.cnr, srv.obj, srv.clientSigner, GetObjectHeaderOptions{}) + assertErr(err) + require.True(t, respHandlerCalled) + require.True(t, *reqHandlerCalled) + }) + t.Run("fail", func(t *testing.T) { + srv := srv + srv.epoch = 4386380643 + assertErr := func(err error) { require.ErrorContains(t, err, "intercept response info: some handler error") } + respHandlerCalled := false + c, reqHandlerCalled := _dial(t, &srv, assertErr, func(opts *Options) { + opts.SetAPIResponseInfoInterceptor(func(info ResponseMetaInfo) error { + if !respHandlerCalled { // dial + respHandlerCalled = true + return nil + } + require.EqualValues(t, 4386380643, info.Epoch()) + require.Equal(t, neofscrypto.PublicKeyBytes(srv.serverSigner.Public()), info.ResponderKey()) + return errors.New("some handler error") + }) + }) + _, err := c.GetObjectHeader(ctx, srv.cnr, srv.obj, srv.clientSigner, GetObjectHeaderOptions{}) + assertErr(err) + require.True(t, respHandlerCalled) + require.True(t, *reqHandlerCalled) + }) + }) } diff --git a/client/object_hash.go b/client/object_hash.go index 9c0cd19df..b2c9e30f2 100644 --- a/client/object_hash.go +++ b/client/object_hash.go @@ -2,173 +2,192 @@ package client import ( "context" + "errors" "fmt" + "time" - "github.com/nspcc-dev/neofs-api-go/v2/acl" - v2object "github.com/nspcc-dev/neofs-api-go/v2/object" - v2refs "github.com/nspcc-dev/neofs-api-go/v2/refs" - rpcapi "github.com/nspcc-dev/neofs-api-go/v2/rpc" - "github.com/nspcc-dev/neofs-api-go/v2/rpc/client" + apiacl "github.com/nspcc-dev/neofs-sdk-go/api/acl" + apiobject "github.com/nspcc-dev/neofs-sdk-go/api/object" + "github.com/nspcc-dev/neofs-sdk-go/api/refs" + apisession "github.com/nspcc-dev/neofs-sdk-go/api/session" "github.com/nspcc-dev/neofs-sdk-go/bearer" + "github.com/nspcc-dev/neofs-sdk-go/checksum" + apistatus "github.com/nspcc-dev/neofs-sdk-go/client/status" cid "github.com/nspcc-dev/neofs-sdk-go/container/id" + neofscrypto "github.com/nspcc-dev/neofs-sdk-go/crypto" + "github.com/nspcc-dev/neofs-sdk-go/object" oid "github.com/nspcc-dev/neofs-sdk-go/object/id" + "github.com/nspcc-dev/neofs-sdk-go/session" "github.com/nspcc-dev/neofs-sdk-go/stat" - "github.com/nspcc-dev/neofs-sdk-go/user" ) -var ( - // special variable for test purposes only, to overwrite real RPC calls. - rpcAPIHashObjectRange = rpcapi.HashObjectRange -) +// HashObjectPayloadRangesOptions groups optional parameters of +// [Client.HashObjectPayloadRanges]. +type HashObjectPayloadRangesOptions struct { + local bool -// PrmObjectHash groups parameters of ObjectHash operation. -type PrmObjectHash struct { - sessionContainer + sessionSet bool + session session.Object - body v2object.GetRangeHashRequestBody + bearerTokenSet bool + bearerToken bearer.Token - csAlgo v2refs.ChecksumType + salt []byte } -// MarkLocal tells the server to execute the operation locally. -func (x *PrmObjectHash) MarkLocal() { - x.meta.SetTTL(1) +// PreventForwarding disables request forwarding to container nodes and +// instructs the server to hash object payload stored locally. +func (x *HashObjectPayloadRangesOptions) PreventForwarding() { + x.local = true } -// WithBearerToken attaches bearer token to be used for the operation. +// WithinSession specifies token of the session preliminary issued by some user +// with the client signer. Session must include [session.VerbObjectRangeHash] +// action. The token must be signed and target the subject authenticated by +// signer passed to [Client.HashObjectPayloadRanges]. If set, the session issuer +// will be treated as the original request sender. // -// If set, underlying eACL rules will be used in access control. +// Note that sessions affect access control only indirectly: they just replace +// request originator. // -// Must be signed. -func (x *PrmObjectHash) WithBearerToken(t bearer.Token) { - var v2token acl.BearerToken - t.WriteToV2(&v2token) - x.meta.SetBearerToken(&v2token) -} - -// SetRangeList sets list of ranges in (offset, length) pair format. -// Required parameter. -// -// If passed as slice, then it must not be mutated before the operation completes. -func (x *PrmObjectHash) SetRangeList(r ...uint64) { - ln := len(r) - if ln%2 != 0 { - panic("odd number of range parameters") - } - - rs := make([]v2object.Range, ln/2) - - for i := 0; i < ln/2; i++ { - rs[i].SetOffset(r[2*i]) - rs[i].SetLength(r[2*i+1]) - } - - x.body.SetRanges(rs) -} - -// TillichZemorAlgo changes the hash function to Tillich-Zemor -// (https://link.springer.com/content/pdf/10.1007/3-540-48658-5_5.pdf). +// With session, [Client.HashObjectPayloadRanges] can also return +// [apistatus.ErrSessionTokenExpired] if the token has expired: this usually +// requires re-issuing the session. // -// By default, SHA256 hash function is used. -func (x *PrmObjectHash) TillichZemorAlgo() { - x.csAlgo = v2refs.TillichZemor +// Note that it makes no sense to start session with the server via +// [Client.StartSession] like for [Client.DeleteObject] or [Client.PutObject]. +func (x *HashObjectPayloadRangesOptions) WithinSession(s session.Object) { + x.session, x.sessionSet = s, true } -// UseSalt sets the salt to XOR the data range before hashing. -// -// Must not be mutated before the operation completes. -func (x *PrmObjectHash) UseSalt(salt []byte) { - x.body.SetSalt(salt) +// WithBearerToken attaches bearer token carrying extended ACL rules that +// replace eACL of the object's container. The token must be issued by the +// container owner and target the subject authenticated by signer passed to +// [Client.HashObjectPayloadRanges]. In practice, bearer token makes sense only +// if it grants hashing rights to the subject. +func (x *HashObjectPayloadRangesOptions) WithBearerToken(t bearer.Token) { + x.bearerToken, x.bearerTokenSet = t, true } -// WithXHeaders specifies list of extended headers (string key-value pairs) -// to be attached to the request. Must have an even length. -// -// Slice must not be mutated until the operation completes. -func (x *PrmObjectHash) WithXHeaders(hs ...string) { - writeXHeadersToMeta(hs, &x.meta) +// WithSalt attaches salt to XOR the object's payload range before hashing. +func (x *HashObjectPayloadRangesOptions) WithSalt(salt []byte) { + x.salt = salt } -// ObjectHash requests checksum of the range list of the object payload using -// NeoFS API protocol. -// -// Returns a list of checksums in raw form: the format of hashes and their number -// is left for the caller to check. Client preserves the order of the server's response. +// HashObjectPayloadRanges requests checksum of the referenced object's payload +// ranges. Checksum type must not be zero, range set must not be empty and +// contain zero-length element. Returns a list of checksums in raw form: the +// format of hashes and their number is left for the caller to check. Client +// preserves the order of the server's response. // -// Exactly one return value is non-nil. By default, server status is returned in res structure. -// Any client's internal or transport errors are returned as `error`, -// see [apistatus] package for NeoFS-specific error types. +// When only object payload's checksums are needed, HashObjectPayloadRanges +// should be used instead of hashing the [Client.GetObjectPayloadRange] or +// [Client.GetObject] result as much more efficient. // -// Context is required and must not be nil. It is used for network communication. -// -// Signer is required and must not be nil. The operation is executed on behalf of the account corresponding to -// the specified Signer, which is taken into account, in particular, for access control. -// -// Return errors: -// - [ErrMissingRanges] -// - [ErrMissingSigner] -func (c *Client) ObjectHash(ctx context.Context, containerID cid.ID, objectID oid.ID, signer user.Signer, prm PrmObjectHash) ([][]byte, error) { - var ( - addr v2refs.Address - cidV2 v2refs.ContainerID - oidV2 v2refs.ObjectID - err error - ) - - defer func() { - c.sendStatistic(stat.MethodObjectHash, err)() - }() - - if len(prm.body.GetRanges()) == 0 { - err = ErrMissingRanges - return nil, err +// HashObjectPayloadRanges returns: +// - [apistatus.ErrContainerNotFound] if referenced container is missing +// - [apistatus.ErrObjectNotFound] if referenced object is missing +// - [apistatus.ErrObjectAccessDenied] if signer has no access to hash the payload +// - [apistatus.ErrObjectOutOfRange] if at least one range is out of bounds +func (c *Client) HashObjectPayloadRanges(ctx context.Context, cnr cid.ID, obj oid.ID, typ checksum.Type, signer neofscrypto.Signer, + opts HashObjectPayloadRangesOptions, ranges []object.Range) ([][]byte, error) { + if signer == nil { + return nil, errMissingSigner + } else if typ == 0 { + return nil, errors.New("zero checksum type") + } else if len(ranges) == 0 { + return nil, errors.New("missing ranges") + } + for i := range ranges { + if ranges[i].Length == 0 { + return nil, fmt.Errorf("zero length of range #%d", i) + } } - containerID.WriteToV2(&cidV2) - addr.SetContainerID(&cidV2) - - objectID.WriteToV2(&oidV2) - addr.SetObjectID(&oidV2) - - if signer == nil { - return nil, ErrMissingSigner + var err error + if c.handleAPIOpResult != nil { + defer func(start time.Time) { + c.handleAPIOpResult(c.serverPubKey, c.endpoint, stat.MethodObjectHash, time.Since(start), err) + }(time.Now()) } - prm.body.SetAddress(&addr) - if prm.csAlgo == v2refs.UnknownChecksum { - prm.body.SetType(v2refs.SHA256) + // form request + req := &apiobject.GetRangeHashRequest{ + Body: &apiobject.GetRangeHashRequest_Body{ + Address: &refs.Address{ + ContainerId: new(refs.ContainerID), + ObjectId: new(refs.ObjectID), + }, + Ranges: make([]*apiobject.Range, len(ranges)), + Salt: opts.salt, + Type: refs.ChecksumType(typ), + }, + MetaHeader: new(apisession.RequestMetaHeader), + } + cnr.WriteToV2(req.Body.Address.ContainerId) + obj.WriteToV2(req.Body.Address.ObjectId) + for i := range ranges { + req.Body.Ranges[i] = &apiobject.Range{Offset: ranges[i].Offset, Length: ranges[i].Length} + } + if opts.sessionSet { + req.MetaHeader.SessionToken = new(apisession.SessionToken) + opts.session.WriteToV2(req.MetaHeader.SessionToken) + } + if opts.bearerTokenSet { + req.MetaHeader.BearerToken = new(apiacl.BearerToken) + opts.bearerToken.WriteToV2(req.MetaHeader.BearerToken) + } + if opts.local { + req.MetaHeader.Ttl = 1 } else { - prm.body.SetType(prm.csAlgo) + req.MetaHeader.Ttl = 2 + } + // FIXME: balance requests need small fixed-size buffers for encoding, its makes + // no sense to mosh them with other buffers + buf := c.signBuffers.Get().(*[]byte) + defer c.signBuffers.Put(buf) + if req.VerifyHeader, err = neofscrypto.SignRequest(signer, req, req.Body, *buf); err != nil { + err = fmt.Errorf("%s: %w", errSignRequest, err) // for closure above + return nil, err } - var req v2object.GetRangeHashRequest - c.prepareRequest(&req, &prm.meta) - req.SetBody(&prm.body) - - buf := c.buffers.Get().(*[]byte) - err = signServiceMessage(signer, &req, *buf) - c.buffers.Put(buf) + // send request + resp, err := c.transport.object.GetRangeHash(ctx, req) if err != nil { - err = fmt.Errorf("sign request: %w", err) + err = fmt.Errorf("%s: %w", errTransport, err) // for closure above return nil, err } - resp, err := rpcAPIHashObjectRange(&c.c, &req, client.WithContext(ctx)) + // intercept response info + if c.interceptAPIRespInfo != nil { + if err = c.interceptAPIRespInfo(ResponseMetaInfo{ + key: resp.GetVerifyHeader().GetBodySignature().GetKey(), + epoch: resp.GetMetaHeader().GetEpoch(), + }); err != nil { + err = fmt.Errorf("%s: %w", errInterceptResponseInfo, err) // for closure above + return nil, err + } + } + + // verify response integrity + if err = neofscrypto.VerifyResponse(resp, resp.Body); err != nil { + err = fmt.Errorf("%s: %w", errResponseSignature, err) // for closure above + return nil, err + } + sts, err := apistatus.ErrorFromV2(resp.GetMetaHeader().GetStatus()) if err != nil { - err = fmt.Errorf("write request: %w", err) + err = fmt.Errorf("%s: %w", errInvalidResponseStatus, err) // for closure above return nil, err } - - var res [][]byte - if err = c.processResponse(resp); err != nil { + if sts != nil { + err = sts // for closure above return nil, err } - res = resp.GetBody().GetHashList() - if len(res) == 0 { - err = newErrMissingResponseField("hash list") + // decode response payload + if resp.Body == nil { + err = errors.New(errMissingResponseBody) // for closure above return nil, err } - - return res, nil + return resp.Body.HashList, nil } diff --git a/client/object_hash_test.go b/client/object_hash_test.go index d2ebd7042..5c3fad79d 100644 --- a/client/object_hash_test.go +++ b/client/object_hash_test.go @@ -1,26 +1,494 @@ package client import ( + "bytes" "context" + "errors" + "fmt" + "math/rand" + "net" "testing" + "time" - v2object "github.com/nspcc-dev/neofs-api-go/v2/object" + apinetmap "github.com/nspcc-dev/neofs-sdk-go/api/netmap" + apiobject "github.com/nspcc-dev/neofs-sdk-go/api/object" + "github.com/nspcc-dev/neofs-sdk-go/api/refs" + apisession "github.com/nspcc-dev/neofs-sdk-go/api/session" + "github.com/nspcc-dev/neofs-sdk-go/api/status" + "github.com/nspcc-dev/neofs-sdk-go/bearer" + bearertest "github.com/nspcc-dev/neofs-sdk-go/bearer/test" + "github.com/nspcc-dev/neofs-sdk-go/checksum" + apistatus "github.com/nspcc-dev/neofs-sdk-go/client/status" cid "github.com/nspcc-dev/neofs-sdk-go/container/id" + cidtest "github.com/nspcc-dev/neofs-sdk-go/container/id/test" + neofscrypto "github.com/nspcc-dev/neofs-sdk-go/crypto" + neofscryptotest "github.com/nspcc-dev/neofs-sdk-go/crypto/test" + netmaptest "github.com/nspcc-dev/neofs-sdk-go/netmap/test" + "github.com/nspcc-dev/neofs-sdk-go/object" oid "github.com/nspcc-dev/neofs-sdk-go/object/id" + oidtest "github.com/nspcc-dev/neofs-sdk-go/object/id/test" + "github.com/nspcc-dev/neofs-sdk-go/session" + sessiontest "github.com/nspcc-dev/neofs-sdk-go/session/test" + "github.com/nspcc-dev/neofs-sdk-go/stat" + usertest "github.com/nspcc-dev/neofs-sdk-go/user/test" + versiontest "github.com/nspcc-dev/neofs-sdk-go/version/test" "github.com/stretchr/testify/require" + "google.golang.org/grpc" + "google.golang.org/grpc/test/bufconn" ) -func TestClient_ObjectHash(t *testing.T) { - c := newClient(t, nil) +type hashObjectPayloadRangesServer struct { + noOtherObjectCalls + // client + cnr cid.ID + obj oid.ID + clientSigner neofscrypto.Signer + typ checksum.Type + ranges []object.Range + salt []byte + local bool + session *session.Object + bearerToken *bearer.Token + // server + sleepDur time.Duration + endpointInfoOnDialServer + hashes [][]byte + errTransport error + modifyResp func(*apiobject.GetRangeHashResponse) + corruptRespSig func(*apiobject.GetRangeHashResponse) +} - t.Run("missing signer", func(t *testing.T) { - var reqBody v2object.GetRangeHashRequestBody - reqBody.SetRanges(make([]v2object.Range, 1)) +func (x hashObjectPayloadRangesServer) GetRangeHash(ctx context.Context, req *apiobject.GetRangeHashRequest) (*apiobject.GetRangeHashResponse, error) { + if x.sleepDur > 0 { + time.Sleep(x.sleepDur) + } + if x.errTransport != nil { + return nil, x.errTransport + } + var sts status.Status + resp := apiobject.GetRangeHashResponse{ + MetaHeader: &apisession.ResponseMetaHeader{Status: &sts, Epoch: x.epoch}, + } + var err error + var cnr cid.ID + var obj oid.ID + sigScheme := refs.SignatureScheme(x.clientSigner.Scheme()) + creatorPubKey := neofscrypto.PublicKeyBytes(x.clientSigner.Public()) + if ctx == nil { + sts.Code, sts.Message = status.InternalServerError, "nil context" + } else if req == nil { + sts.Code, sts.Message = status.InternalServerError, "nil request" + } else if err = neofscrypto.VerifyRequest(req, req.Body); err != nil { + sts.Code, sts.Message = status.SignatureVerificationFail, err.Error() + } else if req.VerifyHeader.BodySignature.Scheme != sigScheme || + !bytes.Equal(req.VerifyHeader.BodySignature.Key, creatorPubKey) { + sts.Code, sts.Message = status.InternalServerError, "[test] unexpected request body signature credentials" + } else if req.VerifyHeader.MetaSignature.Scheme != sigScheme || + !bytes.Equal(req.VerifyHeader.MetaSignature.Key, creatorPubKey) { + sts.Code, sts.Message = status.InternalServerError, "[test] unexpected request meta header signature credentials" + } else if req.VerifyHeader.OriginSignature.Scheme != sigScheme || + !bytes.Equal(req.VerifyHeader.OriginSignature.Key, creatorPubKey) { + sts.Code, sts.Message = status.InternalServerError, "[test] unexpected origin request verification header signature credentials" + } else if req.MetaHeader == nil { + sts.Code, sts.Message = status.InternalServerError, "invalid request: missing meta header" + } else if x.local && req.MetaHeader.Ttl != 1 || !x.local && req.MetaHeader.Ttl != 2 { + sts.Code, sts.Message = status.InternalServerError, fmt.Sprintf("invalid request: invalid meta header: invalid TTL %d", req.MetaHeader.Ttl) + } else if req.Body == nil { + sts.Code, sts.Message = status.InternalServerError, "invalid request: missing body" + } else if req.Body.Address == nil { + sts.Code, sts.Message = status.InternalServerError, "invalid request: invalid body: missing address" + } else if req.Body.Address.ObjectId == nil { + sts.Code, sts.Message = status.InternalServerError, "invalid request: invalid body: invalid address: missing ID" + } else if err = obj.ReadFromV2(req.Body.Address.ObjectId); err != nil { + sts.Code, sts.Message = status.InternalServerError, fmt.Sprintf("invalid request: invalid body: invalid address: invalid ID: %s", err) + } else if obj != x.obj { + sts.Code, sts.Message = status.InternalServerError, "[test] wrong ID" + } else if req.Body.Address.ContainerId == nil { + sts.Code, sts.Message = status.InternalServerError, "invalid request: invalid body: invalid address: missing container" + } else if err = cnr.ReadFromV2(req.Body.Address.ContainerId); err != nil { + sts.Code, sts.Message = status.InternalServerError, fmt.Sprintf("invalid request: invalid body: invalid address: invalid container: %s", err) + } else if cnr != x.cnr { + sts.Code, sts.Message = status.InternalServerError, "[test] wrong container" + } else if !bytes.Equal(req.Body.Salt, x.salt) { + sts.Code, sts.Message = status.InternalServerError, "[test] wrong salt" + } else if req.Body.Type != refs.ChecksumType(x.typ) { + sts.Code, sts.Message = status.InternalServerError, "[test] wrong checksum type" + } else if len(req.Body.Ranges) == 0 { + sts.Code, sts.Message = status.InternalServerError, "invalid request: invalid body: missing ranges" + } else if len(req.Body.Ranges) != len(x.ranges) { + sts.Code, sts.Message = status.InternalServerError, "[test] wrong number of ranges" + } else { + for i := range req.Body.Ranges { + if req.Body.Ranges[i] == nil { + sts.Code, sts.Message = status.InternalServerError, fmt.Sprintf("invalid request: invalid body: nil range #%d", i) + } else if req.Body.Ranges[i].Length != x.ranges[i].Length || req.Body.Ranges[i].Offset != x.ranges[i].Offset { + sts.Code, sts.Message = status.InternalServerError, fmt.Sprintf("[test] wrong range #%d", i) + } + } + } + if sts.Code == 0 && x.session != nil { + var so session.Object + if req.MetaHeader.SessionToken == nil { + sts.Code, sts.Message = status.InternalServerError, "[test] missing session token" + } else if err = so.ReadFromV2(req.MetaHeader.SessionToken); err != nil { + sts.Code, sts.Message = status.InternalServerError, fmt.Sprintf("invalid request: invalid meta header: invalid session token: %v", err) + } else if !bytes.Equal(so.Marshal(), x.session.Marshal()) { + sts.Code, sts.Message = status.InternalServerError, "[test] session token in request differs with the input one" + } + } + if sts.Code == 0 && x.bearerToken != nil { + var bt bearer.Token + if req.MetaHeader.BearerToken == nil { + sts.Code, sts.Message = status.InternalServerError, "[test] missing bearer token" + } else if err = bt.ReadFromV2(req.MetaHeader.BearerToken); err != nil { + sts.Code, sts.Message = status.InternalServerError, fmt.Sprintf("invalid request: invalid meta header: invalid bearer token: %v", err) + } else if !bytes.Equal(bt.Marshal(), x.bearerToken.Marshal()) { + sts.Code, sts.Message = status.InternalServerError, "[test] bearer token in request differs with the input one" + } + } + if sts.Code == 0 { + resp.MetaHeader.Status = nil + resp.Body = &apiobject.GetRangeHashResponse_Body{HashList: x.hashes} + } + if x.modifyResp != nil { + x.modifyResp(&resp) + } + resp.VerifyHeader, err = neofscrypto.SignResponse(x.serverSigner, &resp, resp.Body, nil) + if err != nil { + return nil, fmt.Errorf("sign response: %w", err) + } + if x.corruptRespSig != nil { + x.corruptRespSig(&resp) + } + return &resp, nil +} - _, err := c.ObjectHash(context.Background(), cid.ID{}, oid.ID{}, nil, PrmObjectHash{ - body: reqBody, +func TestClient_HashObjectPayloadRanges(t *testing.T) { + ctx := context.Background() + var srv hashObjectPayloadRangesServer + srv.sleepDur = 10 * time.Millisecond + srv.serverSigner = neofscryptotest.RandomSigner() + srv.latestVersion = versiontest.Version() + srv.nodeInfo = netmaptest.NodeInfo() + srv.nodeInfo.SetPublicKey(neofscrypto.PublicKeyBytes(srv.serverSigner.Public())) + usr, _ := usertest.TwoUsers() + srv.clientSigner = usr + srv.cnr = cidtest.ID() + srv.obj = oidtest.ID() + srv.typ = checksum.Type(rand.Uint32() % 256) + if srv.typ == 0 { + srv.typ++ + } + srv.ranges = []object.Range{{1, 2}, {3, 4}} + srv.hashes = [][]byte{[]byte("hello"), []byte("world")} + _dial := func(t testing.TB, srv *hashObjectPayloadRangesServer, assertErr func(error), customizeOpts func(*Options)) (*Client, *bool) { + var opts Options + var handlerCalled bool + opts.SetAPIRequestResultHandler(func(nodeKey []byte, endpoint string, op stat.Method, dur time.Duration, err error) { + handlerCalled = true + require.Equal(t, srv.nodeInfo.PublicKey(), nodeKey) + require.Equal(t, "localhost:8080", endpoint) + require.Equal(t, stat.MethodObjectHash, op) + require.Greater(t, dur, srv.sleepDur) + assertErr(err) }) + if customizeOpts != nil { + customizeOpts(&opts) + } + + c, err := New(anyValidURI, opts) + require.NoError(t, err) + + conn := bufconn.Listen(10 << 10) + gs := grpc.NewServer() + apinetmap.RegisterNetmapServiceServer(gs, srv) + apiobject.RegisterObjectServiceServer(gs, srv) + go func() { _ = gs.Serve(conn) }() + t.Cleanup(gs.Stop) + + c.dial = func(ctx context.Context, _ string) (net.Conn, error) { return conn.DialContext(ctx) } + require.NoError(t, c.Dial(ctx)) - require.ErrorIs(t, err, ErrMissingSigner) + return c, &handlerCalled + } + dial := func(t testing.TB, srv *hashObjectPayloadRangesServer, assertErr func(error)) (*Client, *bool) { + return _dial(t, srv, assertErr, nil) + } + t.Run("invalid signer", func(t *testing.T) { + c, err := New(anyValidURI, Options{}) + require.NoError(t, err) + _, err = c.HashObjectPayloadRanges(ctx, srv.cnr, srv.obj, srv.typ, nil, HashObjectPayloadRangesOptions{}, srv.ranges) + require.ErrorIs(t, err, errMissingSigner) + }) + t.Run("invalid checksum type", func(t *testing.T) { + c, err := New(anyValidURI, Options{}) + require.NoError(t, err) + _, err = c.HashObjectPayloadRanges(ctx, srv.cnr, srv.obj, 0, srv.clientSigner, HashObjectPayloadRangesOptions{}, srv.ranges) + require.EqualError(t, err, "zero checksum type") + }) + t.Run("invalid ranges", func(t *testing.T) { + c, err := New(anyValidURI, Options{}) + require.NoError(t, err) + _, err = c.HashObjectPayloadRanges(ctx, srv.cnr, srv.obj, srv.typ, srv.clientSigner, HashObjectPayloadRangesOptions{}, nil) + require.EqualError(t, err, "missing ranges") + _, err = c.HashObjectPayloadRanges(ctx, srv.cnr, srv.obj, srv.typ, srv.clientSigner, HashObjectPayloadRangesOptions{}, []object.Range{}) + require.EqualError(t, err, "missing ranges") + _, err = c.HashObjectPayloadRanges(ctx, srv.cnr, srv.obj, srv.typ, srv.clientSigner, HashObjectPayloadRangesOptions{}, []object.Range{ + {1, 2}, {3, 0}, + }) + require.EqualError(t, err, "zero length of range #1") + }) + t.Run("OK", func(t *testing.T) { + for _, testCase := range []struct { + name string + setOpts func(srv *hashObjectPayloadRangesServer, opts *HashObjectPayloadRangesOptions) + }{ + {name: "default", setOpts: func(srv *hashObjectPayloadRangesServer, opts *HashObjectPayloadRangesOptions) {}}, + {name: "with session", setOpts: func(srv *hashObjectPayloadRangesServer, opts *HashObjectPayloadRangesOptions) { + so := sessiontest.Object() + opts.WithinSession(so) + srv.session = &so + }}, + {name: "with bearer token", setOpts: func(srv *hashObjectPayloadRangesServer, opts *HashObjectPayloadRangesOptions) { + bt := bearertest.Token() + opts.WithBearerToken(bt) + srv.bearerToken = &bt + }}, + {name: "no forwarding", setOpts: func(srv *hashObjectPayloadRangesServer, opts *HashObjectPayloadRangesOptions) { + srv.local = true + opts.PreventForwarding() + }}, + {name: "with salt", setOpts: func(srv *hashObjectPayloadRangesServer, opts *HashObjectPayloadRangesOptions) { + srv.salt = []byte("any_salt") + opts.WithSalt(srv.salt) + }}, + } { + t.Run(testCase.name, func(t *testing.T) { + srv := srv + var opts HashObjectPayloadRangesOptions + testCase.setOpts(&srv, &opts) + assertErr := func(err error) { require.NoError(t, err) } + c, handlerCalled := dial(t, &srv, assertErr) + res, err := c.HashObjectPayloadRanges(ctx, srv.cnr, srv.obj, srv.typ, srv.clientSigner, opts, srv.ranges) + assertErr(err) + require.Equal(t, srv.hashes, res) + require.True(t, *handlerCalled) + }) + } + }) + t.Run("fail", func(t *testing.T) { + t.Run("sign request", func(t *testing.T) { + srv := srv + srv.sleepDur = 0 + assertErr := func(err error) { require.ErrorContains(t, err, errSignRequest) } + c, handlerCalled := dial(t, &srv, assertErr) + _, err := c.HashObjectPayloadRanges(ctx, srv.cnr, srv.obj, srv.typ, neofscryptotest.FailSigner(srv.clientSigner), + HashObjectPayloadRangesOptions{}, srv.ranges) + assertErr(err) + require.True(t, *handlerCalled) + }) + t.Run("transport", func(t *testing.T) { + srv := srv + srv.errTransport = errors.New("any transport failure") + assertErr := func(err error) { + require.ErrorContains(t, err, errTransport) + require.ErrorContains(t, err, "any transport failure") + } + c, handlerCalled := dial(t, &srv, assertErr) + _, err := c.HashObjectPayloadRanges(ctx, srv.cnr, srv.obj, srv.typ, srv.clientSigner, HashObjectPayloadRangesOptions{}, srv.ranges) + assertErr(err) + require.True(t, *handlerCalled) + }) + t.Run("invalid response signature", func(t *testing.T) { + for i, testCase := range []struct { + err string + corrupt func(*apiobject.GetRangeHashResponse) + }{ + {err: "missing verification header", + corrupt: func(r *apiobject.GetRangeHashResponse) { r.VerifyHeader = nil }, + }, + {err: "missing body signature", + corrupt: func(r *apiobject.GetRangeHashResponse) { r.VerifyHeader.BodySignature = nil }, + }, + {err: "missing signature of the meta header", + corrupt: func(r *apiobject.GetRangeHashResponse) { r.VerifyHeader.MetaSignature = nil }, + }, + {err: "missing signature of the origin verification header", + corrupt: func(r *apiobject.GetRangeHashResponse) { r.VerifyHeader.OriginSignature = nil }, + }, + {err: "verify body signature: missing public key", + corrupt: func(r *apiobject.GetRangeHashResponse) { r.VerifyHeader.BodySignature.Key = nil }, + }, + {err: "verify signature of the meta header: missing public key", + corrupt: func(r *apiobject.GetRangeHashResponse) { r.VerifyHeader.MetaSignature.Key = nil }, + }, + {err: "verify signature of the origin verification header: missing public key", + corrupt: func(r *apiobject.GetRangeHashResponse) { r.VerifyHeader.OriginSignature.Key = nil }, + }, + {err: "verify body signature: decode public key from binary", + corrupt: func(r *apiobject.GetRangeHashResponse) { + r.VerifyHeader.BodySignature.Key = []byte("not a public key") + }, + }, + {err: "verify signature of the meta header: decode public key from binary", + corrupt: func(r *apiobject.GetRangeHashResponse) { + r.VerifyHeader.MetaSignature.Key = []byte("not a public key") + }, + }, + {err: "verify signature of the origin verification header: decode public key from binary", + corrupt: func(r *apiobject.GetRangeHashResponse) { + r.VerifyHeader.OriginSignature.Key = []byte("not a public key") + }, + }, + {err: "verify body signature: invalid scheme -1", + corrupt: func(r *apiobject.GetRangeHashResponse) { r.VerifyHeader.BodySignature.Scheme = -1 }, + }, + {err: "verify body signature: unsupported scheme 3", + corrupt: func(r *apiobject.GetRangeHashResponse) { r.VerifyHeader.BodySignature.Scheme = 3 }, + }, + {err: "verify signature of the meta header: unsupported scheme 3", + corrupt: func(r *apiobject.GetRangeHashResponse) { r.VerifyHeader.MetaSignature.Scheme = 3 }, + }, + {err: "verify signature of the origin verification header: unsupported scheme 3", + corrupt: func(r *apiobject.GetRangeHashResponse) { r.VerifyHeader.OriginSignature.Scheme = 3 }, + }, + {err: "verify body signature: signature mismatch", + corrupt: func(r *apiobject.GetRangeHashResponse) { r.VerifyHeader.BodySignature.Sign[0]++ }, + }, + {err: "verify signature of the meta header: signature mismatch", + corrupt: func(r *apiobject.GetRangeHashResponse) { r.VerifyHeader.MetaSignature.Sign[0]++ }, + }, + {err: "verify signature of the origin verification header: signature mismatch", + corrupt: func(r *apiobject.GetRangeHashResponse) { r.VerifyHeader.OriginSignature.Sign[0]++ }, + }, + {err: "verify body signature: signature mismatch", + corrupt: func(r *apiobject.GetRangeHashResponse) { + r.VerifyHeader.BodySignature.Key = neofscrypto.PublicKeyBytes(neofscryptotest.RandomSigner().Public()) + }, + }, + {err: "verify signature of the meta header: signature mismatch", + corrupt: func(r *apiobject.GetRangeHashResponse) { + r.VerifyHeader.MetaSignature.Key = neofscrypto.PublicKeyBytes(neofscryptotest.RandomSigner().Public()) + }, + }, + {err: "verify signature of the origin verification header: signature mismatch", + corrupt: func(r *apiobject.GetRangeHashResponse) { + r.VerifyHeader.OriginSignature.Key = neofscrypto.PublicKeyBytes(neofscryptotest.RandomSigner().Public()) + }, + }, + } { + srv := srv + srv.corruptRespSig = testCase.corrupt + assertErr := func(err error) { + require.ErrorContains(t, err, errResponseSignature, [2]any{i, testCase}) + require.ErrorContains(t, err, testCase.err, [2]any{i, testCase}) + } + c, handlerCalled := dial(t, &srv, assertErr) + _, err := c.HashObjectPayloadRanges(ctx, srv.cnr, srv.obj, srv.typ, srv.clientSigner, HashObjectPayloadRangesOptions{}, srv.ranges) + assertErr(err) + require.True(t, *handlerCalled) + } + }) + t.Run("invalid response status", func(t *testing.T) { + srv := srv + srv.modifyResp = func(r *apiobject.GetRangeHashResponse) { + r.MetaHeader.Status = &status.Status{Code: status.InternalServerError, Details: make([]*status.Status_Detail, 1)} + } + assertErr := func(err error) { + require.ErrorContains(t, err, errInvalidResponseStatus) + require.ErrorContains(t, err, "details attached but not supported") + } + c, handlerCalled := dial(t, &srv, assertErr) + _, err := c.HashObjectPayloadRanges(ctx, srv.cnr, srv.obj, srv.typ, srv.clientSigner, HashObjectPayloadRangesOptions{}, srv.ranges) + assertErr(err) + require.True(t, *handlerCalled) + }) + t.Run("status errors", func(t *testing.T) { + for _, testCase := range []struct { + code uint32 + errConst error + errVar any + }{ + {code: 1 << 32 / 2}, + {code: status.InternalServerError, errConst: apistatus.ErrServerInternal, errVar: new(apistatus.InternalServerError)}, + {code: status.SignatureVerificationFail, errConst: apistatus.ErrSignatureVerification, errVar: new(apistatus.SignatureVerificationFailure)}, + {code: status.ContainerNotFound, errConst: apistatus.ErrContainerNotFound, errVar: new(apistatus.ContainerNotFound)}, + {code: status.ObjectNotFound, errConst: apistatus.ErrObjectNotFound, errVar: new(apistatus.ObjectNotFound)}, + {code: status.ObjectAccessDenied, errConst: apistatus.ErrObjectAccessDenied, errVar: new(apistatus.ObjectAccessDenied)}, + {code: status.OutOfRange, errConst: apistatus.ErrObjectOutOfRange, errVar: new(apistatus.ObjectOutOfRange)}, + {code: status.SessionTokenExpired, errConst: apistatus.ErrSessionTokenExpired, errVar: new(apistatus.SessionTokenExpired)}, + } { + srv := srv + srv.modifyResp = func(r *apiobject.GetRangeHashResponse) { + r.MetaHeader.Status = &status.Status{Code: testCase.code, Message: "any message"} + } + assertErr := func(err error) { + require.ErrorIs(t, err, apistatus.Error, testCase) + require.ErrorContains(t, err, "any message", testCase) + if testCase.errConst != nil { + require.ErrorIs(t, err, testCase.errConst, testCase) + } + if testCase.errVar != nil { + require.ErrorAs(t, err, testCase.errVar, testCase) + } + } + c, handlerCalled := dial(t, &srv, assertErr) + _, err := c.HashObjectPayloadRanges(ctx, srv.cnr, srv.obj, srv.typ, srv.clientSigner, HashObjectPayloadRangesOptions{}, srv.ranges) + assertErr(err) + require.True(t, *handlerCalled, testCase) + } + }) + t.Run("response body", func(t *testing.T) { + t.Run("missing", func(t *testing.T) { + srv := srv + assertErr := func(err error) { require.EqualError(t, err, "invalid response: missing body") } + c, handlerCalled := dial(t, &srv, assertErr) + srv.modifyResp = func(r *apiobject.GetRangeHashResponse) { r.Body = nil } + _, err := c.HashObjectPayloadRanges(ctx, srv.cnr, srv.obj, srv.typ, srv.clientSigner, HashObjectPayloadRangesOptions{}, srv.ranges) + assertErr(err) + require.True(t, *handlerCalled) + }) + }) + }) + t.Run("response info handler", func(t *testing.T) { + t.Run("OK", func(t *testing.T) { + srv := srv + srv.epoch = 3598503 + assertErr := func(err error) { require.NoError(t, err) } + respHandlerCalled := false + c, reqHandlerCalled := _dial(t, &srv, assertErr, func(opts *Options) { + opts.SetAPIResponseInfoInterceptor(func(info ResponseMetaInfo) error { + respHandlerCalled = true + require.EqualValues(t, 3598503, info.Epoch()) + require.Equal(t, neofscrypto.PublicKeyBytes(srv.serverSigner.Public()), info.ResponderKey()) + return nil + }) + }) + _, err := c.HashObjectPayloadRanges(ctx, srv.cnr, srv.obj, srv.typ, srv.clientSigner, HashObjectPayloadRangesOptions{}, srv.ranges) + assertErr(err) + require.True(t, respHandlerCalled) + require.True(t, *reqHandlerCalled) + }) + t.Run("fail", func(t *testing.T) { + srv := srv + srv.epoch = 4386380643 + assertErr := func(err error) { require.ErrorContains(t, err, "intercept response info: some handler error") } + respHandlerCalled := false + c, reqHandlerCalled := _dial(t, &srv, assertErr, func(opts *Options) { + opts.SetAPIResponseInfoInterceptor(func(info ResponseMetaInfo) error { + if !respHandlerCalled { // dial + respHandlerCalled = true + return nil + } + require.EqualValues(t, 4386380643, info.Epoch()) + require.Equal(t, neofscrypto.PublicKeyBytes(srv.serverSigner.Public()), info.ResponderKey()) + return errors.New("some handler error") + }) + }) + _, err := c.HashObjectPayloadRanges(ctx, srv.cnr, srv.obj, srv.typ, srv.clientSigner, HashObjectPayloadRangesOptions{}, srv.ranges) + assertErr(err) + require.True(t, respHandlerCalled) + require.True(t, *reqHandlerCalled) + }) }) } diff --git a/client/object_put.go b/client/object_put.go index 1b50bdfd7..de4870374 100644 --- a/client/object_put.go +++ b/client/object_put.go @@ -1,328 +1,310 @@ package client -import ( - "context" - "errors" - "fmt" - "io" - - "github.com/nspcc-dev/neofs-api-go/v2/acl" - v2object "github.com/nspcc-dev/neofs-api-go/v2/object" - rpcapi "github.com/nspcc-dev/neofs-api-go/v2/rpc" - "github.com/nspcc-dev/neofs-api-go/v2/rpc/client" - "github.com/nspcc-dev/neofs-sdk-go/bearer" - neofscrypto "github.com/nspcc-dev/neofs-sdk-go/crypto" - "github.com/nspcc-dev/neofs-sdk-go/object" - oid "github.com/nspcc-dev/neofs-sdk-go/object/id" - "github.com/nspcc-dev/neofs-sdk-go/stat" - "github.com/nspcc-dev/neofs-sdk-go/user" -) - -var ( - // ErrNoSessionExplicitly is a special error to show auto-session is disabled. - ErrNoSessionExplicitly = errors.New("session was removed explicitly") -) - -var ( - // special variable for test purposes only, to overwrite real RPC calls. - rpcAPIPutObject = func(cli *client.Client, r *v2object.PutResponse, o ...client.CallOption) (objectWriter, error) { - return rpcapi.PutObject(cli, r, o...) - } -) - -type objectWriter interface { - Write(*v2object.PutRequest) error - Close() error -} - -// shortStatisticCallback is a shorter version of [stat.OperationCallback] which is calling from [client.Client]. -// The difference is the client already know some info about itself. Despite it the client doesn't know -// duration and error from writer/reader. -type shortStatisticCallback func(err error) - -// PrmObjectPutInit groups parameters of ObjectPutInit operation. -type PrmObjectPutInit struct { - sessionContainer - - copyNum uint32 -} - -// SetCopiesNumber sets the minimal number of copies (out of the number specified by container placement policy) for -// the object PUT operation to succeed. This means that object operation will return with successful status even before -// container placement policy is completely satisfied. -func (x *PrmObjectPutInit) SetCopiesNumber(copiesNumber uint32) { - x.copyNum = copiesNumber -} - -// ResObjectPut groups the final result values of ObjectPutInit operation. -type ResObjectPut struct { - obj oid.ID -} - -// StoredObjectID returns identifier of the saved object. -func (x ResObjectPut) StoredObjectID() oid.ID { - return x.obj -} - -// ObjectWriter is designed to write one object to NeoFS system. -type ObjectWriter interface { - io.WriteCloser - GetResult() ResObjectPut -} - -// DefaultObjectWriter implements [ObjectWriter]. +// var ( +// // ErrNoSessionExplicitly is a special error to show auto-session is disabled. +// ErrNoSessionExplicitly = errors.New("session was removed explicitly") +// ) // -// Must be initialized using [Client.ObjectPutInit], any other usage is unsafe. -type DefaultObjectWriter struct { - cancelCtxStream context.CancelFunc - - client *Client - stream objectWriter - streamClosed bool - - signer neofscrypto.Signer - res ResObjectPut - err error - - chunkCalled bool - - respV2 v2object.PutResponse - req v2object.PutRequest - partInit v2object.PutObjectPartInit - partChunk v2object.PutObjectPartChunk - - statisticCallback shortStatisticCallback - - buf []byte - bufCleanCallback func() -} - -// WithBearerToken attaches bearer token to be used for the operation. -// Should be called once before any writing steps. -func (x *PrmObjectPutInit) WithBearerToken(t bearer.Token) { - var v2token acl.BearerToken - t.WriteToV2(&v2token) - x.meta.SetBearerToken(&v2token) -} - -// MarkLocal tells the server to execute the operation locally. -func (x *PrmObjectPutInit) MarkLocal() { - x.meta.SetTTL(1) -} - -// WithXHeaders specifies list of extended headers (string key-value pairs) -// to be attached to the request. Must have an even length. +// var ( +// // special variable for test purposes only, to overwrite real RPC calls. +// rpcAPIPutObject = func(cli *client.Client, r *v2object.PutResponse, o ...client.CallOption) (objectWriter, error) { +// return rpcapi.PutObject(cli, r, o...) +// } +// ) // -// Slice must not be mutated until the operation completes. -func (x *PrmObjectPutInit) WithXHeaders(hs ...string) { - writeXHeadersToMeta(hs, &x.meta) -} - -// writeHeader writes header of the object. Result means success. -// Failure reason can be received via [DefaultObjectWriter.Close]. -func (x *DefaultObjectWriter) writeHeader(hdr object.Object) error { - v2Hdr := hdr.ToV2() - - x.partInit.SetObjectID(v2Hdr.GetObjectID()) - x.partInit.SetHeader(v2Hdr.GetHeader()) - x.partInit.SetSignature(v2Hdr.GetSignature()) - - x.req.GetBody().SetObjectPart(&x.partInit) - x.req.SetVerificationHeader(nil) - - x.err = signServiceMessage(x.signer, &x.req, x.buf) - if x.err != nil { - x.err = fmt.Errorf("sign message: %w", x.err) - return x.err - } - - x.err = x.stream.Write(&x.req) - return x.err -} - -// WritePayloadChunk writes chunk of the object payload. Result means success. -// Failure reason can be received via [DefaultObjectWriter.Close]. -func (x *DefaultObjectWriter) Write(chunk []byte) (n int, err error) { - if !x.chunkCalled { - x.chunkCalled = true - x.req.GetBody().SetObjectPart(&x.partChunk) - } - - var writtenBytes int - - for ln := len(chunk); ln > 0; ln = len(chunk) { - // maxChunkLen restricts maximum byte length of the chunk - // transmitted in a single stream message. It depends on - // server settings and other message fields, but for now - // we simply assume that 3MB is large enough to reduce the - // number of messages, and not to exceed the limit - // (4MB by default for gRPC servers). - const maxChunkLen = 3 << 20 - if ln > maxChunkLen { - ln = maxChunkLen - } - - // we deal with size limit overflow above, but there is another case: - // what if method is called with "small" chunk many times? We write - // a message to the stream on each call. Alternatively, we could use buffering. - // In most cases, the chunk length does not vary between calls. Given this - // assumption, as well as the length of the payload from the header, it is - // possible to buffer the data of intermediate chunks, and send a message when - // the allocated buffer is filled, or when the last chunk is received. - // It is mentally assumed that allocating and filling the buffer is better than - // synchronous sending, but this needs to be tested. - x.partChunk.SetChunk(chunk[:ln]) - x.req.SetVerificationHeader(nil) - - x.err = signServiceMessage(x.signer, &x.req, x.buf) - if x.err != nil { - x.err = fmt.Errorf("sign message: %w", x.err) - return writtenBytes, x.err - } - - x.err = x.stream.Write(&x.req) - if x.err != nil { - if errors.Is(x.err, io.EOF) { - _ = x.stream.Close() - x.err = x.client.processResponse(&x.respV2) - x.streamClosed = true - x.cancelCtxStream() - } - - return writtenBytes, x.err - } - - writtenBytes += len(chunk[:ln]) - chunk = chunk[ln:] - } - - return writtenBytes, nil -} - -// Close ends writing the object and returns the result of the operation -// along with the final results. Must be called after using the [DefaultObjectWriter]. -// -// Exactly one return value is non-nil. By default, server status is returned in res structure. -// Any client's internal or transport errors are returned as Go built-in error. -// If Client is tuned to resolve NeoFS API statuses, then NeoFS failures -// codes are returned as error. -// -// Return errors: -// - global (see Client docs) -// - [apistatus.ErrContainerNotFound] -// - [apistatus.ErrObjectAccessDenied] -// - [apistatus.ErrObjectLocked] -// - [apistatus.ErrLockNonRegularObject] -// - [apistatus.ErrSessionTokenNotFound] -// - [apistatus.ErrSessionTokenExpired] -func (x *DefaultObjectWriter) Close() error { - if x.statisticCallback != nil { - defer func() { - x.statisticCallback(x.err) - }() - } - - if x.bufCleanCallback != nil { - defer x.bufCleanCallback() - } - - if x.streamClosed { - return nil - } - - defer x.cancelCtxStream() - - // Ignore io.EOF error, because it is expected error for client-side - // stream termination by the server. E.g. when stream contains invalid - // message. Server returns an error in response message (in status). - if x.err != nil && !errors.Is(x.err, io.EOF) { - return x.err - } - - if x.err = x.stream.Close(); x.err != nil { - return x.err - } - - if x.err = x.client.processResponse(&x.respV2); x.err != nil { - return x.err - } - - const fieldID = "ID" - - idV2 := x.respV2.GetBody().GetObjectID() - if idV2 == nil { - x.err = newErrMissingResponseField(fieldID) - return x.err - } - - x.err = x.res.obj.ReadFromV2(*idV2) - if x.err != nil { - x.err = newErrInvalidResponseField(fieldID, x.err) - } - - return x.err -} - -// GetResult returns the put operation result. -func (x *DefaultObjectWriter) GetResult() ResObjectPut { - return x.res -} - -// ObjectPutInit initiates writing an object through a remote server using NeoFS API protocol. -// Header length is limited to [object.MaxHeaderLen]. -// -// The call only opens the transmission channel, explicit recording is done using the [ObjectWriter]. -// Exactly one return value is non-nil. Resulting writer must be finally closed. -// -// Context is required and must not be nil. It will be used for network communication for the whole object transmission, -// including put init (this method) and subsequent object payload writes via ObjectWriter. -// -// Signer is required and must not be nil. The operation is executed on behalf of -// the account corresponding to the specified Signer, which is taken into account, in particular, for access control. -// -// Returns errors: -// - [ErrMissingSigner] -func (c *Client) ObjectPutInit(ctx context.Context, hdr object.Object, signer user.Signer, prm PrmObjectPutInit) (ObjectWriter, error) { - var err error - defer func() { - c.sendStatistic(stat.MethodObjectPut, err)() - }() - var w DefaultObjectWriter - w.statisticCallback = func(err error) { - c.sendStatistic(stat.MethodObjectPutStream, err)() - } - - if signer == nil { - return nil, ErrMissingSigner - } - - ctx, cancel := context.WithCancel(ctx) - stream, err := rpcAPIPutObject(&c.c, &w.respV2, client.WithContext(ctx)) - if err != nil { - cancel() - err = fmt.Errorf("open stream: %w", err) - return nil, err - } - - buf := c.buffers.Get().(*[]byte) - w.buf = *buf - w.bufCleanCallback = func() { - c.buffers.Put(buf) - } - - w.signer = signer - w.cancelCtxStream = cancel - w.client = c - w.stream = stream - w.partInit.SetCopiesNumber(prm.copyNum) - w.req.SetBody(new(v2object.PutRequestBody)) - c.prepareRequest(&w.req, &prm.meta) - - if err = w.writeHeader(hdr); err != nil { - _ = w.Close() - err = fmt.Errorf("header write: %w", err) - return nil, err - } - - return &w, nil -} +// type objectWriter interface { +// Write(*v2object.PutRequest) error +// Close() error +// } +// +// // shortStatisticCallback is a shorter version of [stat.OperationCallback] which is calling from [client.Client]. +// // The difference is the client already know some info about itself. Despite it the client doesn't know +// // duration and error from writer/reader. +// type shortStatisticCallback func(err error) +// +// // PrmObjectPutInit groups parameters of ObjectPutInit operation. +// type PrmObjectPutInit struct { +// sessionContainer +// +// copyNum uint32 +// } +// +// // SetCopiesNumber sets the minimal number of copies (out of the number specified by container placement policy) for +// // the object PUT operation to succeed. This means that object operation will return with successful status even before +// // container placement policy is completely satisfied. +// func (x *PrmObjectPutInit) SetCopiesNumber(copiesNumber uint32) { +// x.copyNum = copiesNumber +// } +// +// // ResObjectPut groups the final result values of ObjectPutInit operation. +// type ResObjectPut struct { +// obj oid.ID +// } +// +// // StoredObjectID returns identifier of the saved object. +// func (x ResObjectPut) StoredObjectID() oid.ID { +// return x.obj +// } +// +// // ObjectWriter is designed to write one object to NeoFS system. +// type ObjectWriter interface { +// io.WriteCloser +// GetResult() ResObjectPut +// } +// +// // DefaultObjectWriter implements [ObjectWriter]. +// // +// // Must be initialized using [Client.ObjectPutInit], any other usage is unsafe. +// type DefaultObjectWriter struct { +// cancelCtxStream context.CancelFunc +// +// client *Client +// stream objectWriter +// streamClosed bool +// +// signer neofscrypto.Signer +// res ResObjectPut +// err error +// +// chunkCalled bool +// +// respV2 v2object.PutResponse +// req v2object.PutRequest +// partInit v2object.PutObjectPartInit +// partChunk v2object.PutObjectPartChunk +// +// statisticCallback shortStatisticCallback +// +// buf []byte +// bufCleanCallback func() +// } +// +// // WithBearerToken attaches bearer token to be used for the operation. +// // Should be called once before any writing steps. +// func (x *PrmObjectPutInit) WithBearerToken(t bearer.Token) { +// var v2token acl.BearerToken +// t.WriteToV2(&v2token) +// x.meta.SetBearerToken(&v2token) +// } +// +// // MarkLocal tells the server to execute the operation locally. +// func (x *PrmObjectPutInit) MarkLocal() { +// x.meta.SetTTL(1) +// } +// +// // WithXHeaders specifies list of extended headers (string key-value pairs) +// // to be attached to the request. Must have an even length. +// // +// // Slice must not be mutated until the operation completes. +// func (x *PrmObjectPutInit) WithXHeaders(hs ...string) { +// writeXHeadersToMeta(hs, &x.meta) +// } +// +// // writeHeader writes header of the object. Result means success. +// // Failure reason can be received via [DefaultObjectWriter.Close]. +// func (x *DefaultObjectWriter) writeHeader(hdr object.Object) error { +// v2Hdr := hdr.ToV2() +// +// x.partInit.SetObjectID(v2Hdr.GetObjectID()) +// x.partInit.SetHeader(v2Hdr.GetHeader()) +// x.partInit.SetSignature(v2Hdr.GetSignature()) +// +// x.req.GetBody().SetObjectPart(&x.partInit) +// x.req.SetVerificationHeader(nil) +// +// x.err = signServiceMessage(x.signer, &x.req, x.buf) +// if x.err != nil { +// x.err = fmt.Errorf("sign message: %w", x.err) +// return x.err +// } +// +// x.err = x.stream.Write(&x.req) +// return x.err +// } +// +// // WritePayloadChunk writes chunk of the object payload. Result means success. +// // Failure reason can be received via [DefaultObjectWriter.Close]. +// func (x *DefaultObjectWriter) Write(chunk []byte) (n int, err error) { +// if !x.chunkCalled { +// x.chunkCalled = true +// x.req.GetBody().SetObjectPart(&x.partChunk) +// } +// +// var writtenBytes int +// +// for ln := len(chunk); ln > 0; ln = len(chunk) { +// // maxChunkLen restricts maximum byte length of the chunk +// // transmitted in a single stream message. It depends on +// // server settings and other message fields, but for now +// // we simply assume that 3MB is large enough to reduce the +// // number of messages, and not to exceed the limit +// // (4MB by default for gRPC servers). +// const maxChunkLen = 3 << 20 +// if ln > maxChunkLen { +// ln = maxChunkLen +// } +// +// // we deal with size limit overflow above, but there is another case: +// // what if method is called with "small" chunk many times? We write +// // a message to the stream on each call. Alternatively, we could use buffering. +// // In most cases, the chunk length does not vary between calls. Given this +// // assumption, as well as the length of the payload from the header, it is +// // possible to buffer the data of intermediate chunks, and send a message when +// // the allocated buffer is filled, or when the last chunk is received. +// // It is mentally assumed that allocating and filling the buffer is better than +// // synchronous sending, but this needs to be tested. +// x.partChunk.SetChunk(chunk[:ln]) +// x.req.SetVerificationHeader(nil) +// +// x.err = signServiceMessage(x.signer, &x.req, x.buf) +// if x.err != nil { +// x.err = fmt.Errorf("sign message: %w", x.err) +// return writtenBytes, x.err +// } +// +// x.err = x.stream.Write(&x.req) +// if x.err != nil { +// if errors.Is(x.err, io.EOF) { +// _ = x.stream.Close() +// x.err = x.client.processResponse(&x.respV2) +// x.streamClosed = true +// x.cancelCtxStream() +// } +// +// return writtenBytes, x.err +// } +// +// writtenBytes += len(chunk[:ln]) +// chunk = chunk[ln:] +// } +// +// return writtenBytes, nil +// } +// +// // Close ends writing the object and returns the result of the operation +// // along with the final results. Must be called after using the [DefaultObjectWriter]. +// // +// // Exactly one return value is non-nil. By default, server status is returned in res structure. +// // Any client's internal or transport errors are returned as Go built-in error. +// // If Client is tuned to resolve NeoFS API statuses, then NeoFS failures +// // codes are returned as error. +// // +// // Return errors: +// // - global (see Client docs) +// // - [apistatus.ErrContainerNotFound] +// // - [apistatus.ErrObjectAccessDenied] +// // - [apistatus.ErrObjectLocked] +// // - [apistatus.ErrLockNonRegularObject] +// // - [apistatus.ErrSessionTokenNotFound] +// // - [apistatus.ErrSessionTokenExpired] +// func (x *DefaultObjectWriter) Close() error { +// if x.statisticCallback != nil { +// defer func() { +// x.statisticCallback(x.err) +// }() +// } +// +// if x.bufCleanCallback != nil { +// defer x.bufCleanCallback() +// } +// +// if x.streamClosed { +// return nil +// } +// +// defer x.cancelCtxStream() +// +// // Ignore io.EOF error, because it is expected error for client-side +// // stream termination by the server. E.g. when stream contains invalid +// // message. Server returns an error in response message (in status). +// if x.err != nil && !errors.Is(x.err, io.EOF) { +// return x.err +// } +// +// if x.err = x.stream.Close(); x.err != nil { +// return x.err +// } +// +// if x.err = x.client.processResponse(&x.respV2); x.err != nil { +// return x.err +// } +// +// const fieldID = "ID" +// +// idV2 := x.respV2.GetBody().GetObjectID() +// if idV2 == nil { +// x.err = newErrMissingResponseField(fieldID) +// return x.err +// } +// +// x.err = x.res.obj.ReadFromV2(*idV2) +// if x.err != nil { +// x.err = newErrInvalidResponseField(fieldID, x.err) +// } +// +// return x.err +// } +// +// // GetResult returns the put operation result. +// func (x *DefaultObjectWriter) GetResult() ResObjectPut { +// return x.res +// } +// +// // ObjectPutInit initiates writing an object through a remote server using NeoFS API protocol. +// // Header length is limited to [object.MaxHeaderLen]. +// // +// // The call only opens the transmission channel, explicit recording is done using the [ObjectWriter]. +// // Exactly one return value is non-nil. Resulting writer must be finally closed. +// // +// // Context is required and must not be nil. It will be used for network communication for the whole object transmission, +// // including put init (this method) and subsequent object payload writes via ObjectWriter. +// // +// // Signer is required and must not be nil. The operation is executed on behalf of +// // the account corresponding to the specified Signer, which is taken into account, in particular, for access control. +// // +// // Returns errors: +// // - [ErrMissingSigner] +// func (c *Client) ObjectPutInit(ctx context.Context, hdr object.Object, signer user.Signer, prm PrmObjectPutInit) (ObjectWriter, error) { +// var err error +// defer func() { +// c.sendStatistic(stat.MethodObjectPut, err)() +// }() +// var w DefaultObjectWriter +// w.statisticCallback = func(err error) { +// c.sendStatistic(stat.MethodObjectPutStream, err)() +// } +// +// if signer == nil { +// return nil, ErrMissingSigner +// } +// +// ctx, cancel := context.WithCancel(ctx) +// stream, err := rpcAPIPutObject(&c.c, &w.respV2, client.WithContext(ctx)) +// if err != nil { +// cancel() +// err = fmt.Errorf("open stream: %w", err) +// return nil, err +// } +// +// buf := c.buffers.Get().(*[]byte) +// w.buf = *buf +// w.bufCleanCallback = func() { +// c.buffers.Put(buf) +// } +// +// w.signer = signer +// w.cancelCtxStream = cancel +// w.client = c +// w.stream = stream +// w.partInit.SetCopiesNumber(prm.copyNum) +// w.req.SetBody(new(v2object.PutRequestBody)) +// c.prepareRequest(&w.req, &prm.meta) +// +// if err = w.writeHeader(hdr); err != nil { +// _ = w.Close() +// err = fmt.Errorf("header write: %w", err) +// return nil, err +// } +// +// return &w, nil +// } diff --git a/client/object_put_test.go b/client/object_put_test.go index 75b3a2aac..eb60eb484 100644 --- a/client/object_put_test.go +++ b/client/object_put_test.go @@ -1,72 +1,54 @@ package client -import ( - "context" - "errors" - "io" - "testing" - - v2object "github.com/nspcc-dev/neofs-api-go/v2/object" - "github.com/nspcc-dev/neofs-api-go/v2/refs" - "github.com/nspcc-dev/neofs-api-go/v2/rpc/client" - v2session "github.com/nspcc-dev/neofs-api-go/v2/session" - apistatus "github.com/nspcc-dev/neofs-sdk-go/client/status" - "github.com/nspcc-dev/neofs-sdk-go/crypto/test" - "github.com/nspcc-dev/neofs-sdk-go/object" - "github.com/nspcc-dev/neofs-sdk-go/user" - "github.com/nspcc-dev/neofs-sdk-go/version" - "github.com/stretchr/testify/require" -) - -type testPutStreamAccessDenied struct { - resp *v2object.PutResponse - signer user.Signer - t *testing.T -} - -func (t *testPutStreamAccessDenied) Write(req *v2object.PutRequest) error { - switch req.GetBody().GetObjectPart().(type) { - case *v2object.PutObjectPartInit: - return nil - case *v2object.PutObjectPartChunk: - return io.EOF - default: - return errors.New("excuse me?") - } -} - -func (t *testPutStreamAccessDenied) Close() error { - m := new(v2session.ResponseMetaHeader) - - var v refs.Version - version.Current().WriteToV2(&v) - - m.SetVersion(&v) - m.SetStatus(apistatus.ErrObjectAccessDenied.ErrorToV2()) - - t.resp.SetMetaHeader(m) - require.NoError(t.t, signServiceMessage(t.signer, t.resp, nil)) - - return nil -} - -func TestClient_ObjectPutInit(t *testing.T) { - t.Run("EOF-on-status-return", func(t *testing.T) { - c := newClient(t, nil) - signer := test.RandomSignerRFC6979(t) - - rpcAPIPutObject = func(_ *client.Client, r *v2object.PutResponse, _ ...client.CallOption) (objectWriter, error) { - return &testPutStreamAccessDenied{resp: r, signer: signer, t: t}, nil - } - - w, err := c.ObjectPutInit(context.Background(), object.Object{}, signer, PrmObjectPutInit{}) - require.NoError(t, err) - - n, err := w.Write([]byte{1}) - require.Zero(t, n) - require.ErrorIs(t, err, new(apistatus.ObjectAccessDenied)) - - err = w.Close() - require.NoError(t, err) - }) -} +// type testPutStreamAccessDenied struct { +// resp *v2object.PutResponse +// signer user.Signer +// t *testing.T +// } +// +// func (t *testPutStreamAccessDenied) Write(req *v2object.PutRequest) error { +// switch req.GetBody().GetObjectPart().(type) { +// case *v2object.PutObjectPartInit: +// return nil +// case *v2object.PutObjectPartChunk: +// return io.EOF +// default: +// return errors.New("excuse me?") +// } +// } +// +// func (t *testPutStreamAccessDenied) Close() error { +// m := new(v2session.ResponseMetaHeader) +// +// var v refs.Version +// version.Current().WriteToV2(&v) +// +// m.SetVersion(&v) +// m.SetStatus(apistatus.ErrObjectAccessDenied.ErrorToV2()) +// +// t.resp.SetMetaHeader(m) +// require.NoError(t.t, signServiceMessage(t.signer, t.resp, nil)) +// +// return nil +// } +// +// func TestClient_ObjectPutInit(t *testing.T) { +// t.Run("EOF-on-status-return", func(t *testing.T) { +// c := newClient(t, nil) +// signer := test.RandomSignerRFC6979(t) +// +// rpcAPIPutObject = func(_ *client.Client, r *v2object.PutResponse, _ ...client.CallOption) (objectWriter, error) { +// return &testPutStreamAccessDenied{resp: r, signer: signer, t: t}, nil +// } +// +// w, err := c.ObjectPutInit(context.Background(), object.Object{}, signer, PrmObjectPutInit{}) +// require.NoError(t, err) +// +// n, err := w.Write([]byte{1}) +// require.Zero(t, n) +// require.ErrorIs(t, err, new(apistatus.ObjectAccessDenied)) +// +// err = w.Close() +// require.NoError(t, err) +// }) +// } diff --git a/client/object_replicate.go b/client/object_replicate.go index 5b37f27df..37d52b784 100644 --- a/client/object_replicate.go +++ b/client/object_replicate.go @@ -1,231 +1,211 @@ package client -import ( - "bytes" - "context" - "errors" - "fmt" - "io" - "os" - "sync" - - objectgrpc "github.com/nspcc-dev/neofs-api-go/v2/object/grpc" - "github.com/nspcc-dev/neofs-api-go/v2/rpc/client" - "github.com/nspcc-dev/neofs-api-go/v2/rpc/common" - "github.com/nspcc-dev/neofs-api-go/v2/rpc/grpc" - "github.com/nspcc-dev/neofs-api-go/v2/rpc/message" - "github.com/nspcc-dev/neofs-api-go/v2/status" - apistatus "github.com/nspcc-dev/neofs-sdk-go/client/status" - neofscrypto "github.com/nspcc-dev/neofs-sdk-go/crypto" - oid "github.com/nspcc-dev/neofs-sdk-go/object/id" - "google.golang.org/protobuf/encoding/protowire" -) - -// ReplicateObject copies binary-encoded NeoFS object from the given -// [io.ReadSeeker] to remote server for local storage. The signer must -// authenticate a storage node that matches the object's storage policy. Since -// this property can change over NeoFS system time, compliance with the policy -// is checked back to foreseeable moment in the past. The server must be a -// storage node compliant with the current object's storage policy. ID must be -// the same as in src. -// -// ReplicateObject is intended for maintaining data storage by NeoFS system -// nodes only, not for regular use. -// -// Object must be encoded in compliance with Protocol Buffers v3 format in -// ascending order of fields. -// -// Source [io.ReadSeeker] must point to the start. Note that ReplicateObject -// does not reset src to start after the call. If it is needed, do not forget to -// Seek. -// -// See also [DemuxReplicatedObject]. -// -// Return errors: -// - [apistatus.ErrServerInternal]: internal server error described in the text message; -// - [apistatus.ErrObjectAccessDenied]: the signer does not authenticate any -// NeoFS storage node matching storage policy of the container referenced by the -// replicated object; -// - [apistatus.ErrContainerNotFound]: the container to which the replicated -// object is associated was not found. -func (c *Client) ReplicateObject(ctx context.Context, id oid.ID, src io.ReadSeeker, signer neofscrypto.Signer) error { - const svcName = "neo.fs.v2.object.ObjectService" - const opName = "Replicate" - stream, err := c.c.Init(common.CallMethodInfoUnary(svcName, opName), - client.WithContext(ctx), client.AllowBinarySendingOnly()) - if err != nil { - return fmt.Errorf("init service=%s/op=%s RPC: %w", svcName, opName, err) - } - - msg, err := prepareReplicateMessage(id, src, signer) - if err != nil { - return err - } - - err = stream.WriteMessage(client.BinaryMessage(msg)) - if err != nil && !errors.Is(err, io.EOF) { // io.EOF means the server closed the stream on its side - return fmt.Errorf("send request: %w", err) - } - - var resp replicateResponse - err = stream.ReadMessage(&resp) - if err != nil { - if errors.Is(err, io.EOF) { - err = io.ErrUnexpectedEOF - } - - return fmt.Errorf("recv response: %w", err) - } - - _ = stream.Close() - - return resp.err -} - -// DemuxReplicatedObject allows to share same argument between multiple -// [Client.ReplicateObject] calls for deduplication of network messages. This -// option should be used with caution and only to achieve traffic demux -// optimization goals. -func DemuxReplicatedObject(src io.ReadSeeker) io.ReadSeeker { - return &demuxReplicationMessage{ - rs: src, - } -} - -type demuxReplicationMessage struct { - rs io.ReadSeeker - - mtx sync.Mutex - msg []byte - err error -} - -func (x *demuxReplicationMessage) Read(p []byte) (n int, err error) { - return x.rs.Read(p) -} - -func (x *demuxReplicationMessage) Seek(offset int64, whence int) (int64, error) { - return x.rs.Seek(offset, whence) -} - -func prepareReplicateMessage(id oid.ID, src io.ReadSeeker, signer neofscrypto.Signer) ([]byte, error) { - srm, ok := src.(*demuxReplicationMessage) - if !ok { - return newReplicateMessage(id, src, signer) - } - - srm.mtx.Lock() - defer srm.mtx.Unlock() - - if srm.msg == nil && srm.err == nil { - srm.msg, srm.err = newReplicateMessage(id, src, signer) - } - - return srm.msg, srm.err -} - -func newReplicateMessage(id oid.ID, src io.ReadSeeker, signer neofscrypto.Signer) ([]byte, error) { - var objSize uint64 - switch v := src.(type) { - default: - n, err := src.Seek(0, io.SeekEnd) - if err != nil { - return nil, fmt.Errorf("seek to end: %w", err) - } else if n < 0 { - return nil, fmt.Errorf("seek to end returned negative value %d", objSize) - } - - _, err = src.Seek(-n, io.SeekCurrent) - if err != nil { - return nil, fmt.Errorf("seek back to initial pos: %w", err) - } - - objSize = uint64(n) - case *os.File: - fileInfo, err := v.Stat() - if err != nil { - return nil, fmt.Errorf("get file info: %w", err) - } - - objSize = uint64(fileInfo.Size()) - case *bytes.Reader: - n := v.Size() - if n < 0 { - return nil, fmt.Errorf("negative byte buffer size return %d", objSize) - } - - objSize = uint64(n) - } - - // TODO: limit the objSize? - - idSig, err := signer.Sign(id[:]) - if err != nil { - return nil, fmt.Errorf("sign object ID: %w", err) - } - - bPubKey := neofscrypto.PublicKeyBytes(signer.Public()) - sigScheme := uint64(signer.Scheme()) - - const fieldNumObject = 1 - const fieldNumSignature = 2 - - sigSize := protowire.SizeTag(fieldNumSigPubKey) + protowire.SizeBytes(len(bPubKey)) + - protowire.SizeTag(fieldNumSigVal) + protowire.SizeBytes(len(idSig)) + - protowire.SizeTag(fieldNumSigScheme) + protowire.SizeVarint(sigScheme) - - msgSize := protowire.SizeTag(fieldNumObject) + protowire.SizeVarint(objSize) + - protowire.SizeTag(fieldNumSignature) + protowire.SizeBytes(sigSize) - - // TODO(#544): support external buffers - msg := make([]byte, 0, uint64(msgSize)+objSize) - - msg = protowire.AppendTag(msg, fieldNumObject, protowire.BytesType) - msg = protowire.AppendVarint(msg, objSize) - msg = msg[:uint64(len(msg))+objSize] - - bufObj := msg[uint64(len(msg))-objSize:] - _, err = io.ReadFull(src, bufObj) - if err != nil { - return nil, fmt.Errorf("read full object into the buffer: %w", err) - } - - msg = protowire.AppendTag(msg, fieldNumSignature, protowire.BytesType) - msg = protowire.AppendVarint(msg, uint64(sigSize)) - msg = protowire.AppendTag(msg, fieldNumSigPubKey, protowire.BytesType) - msg = protowire.AppendBytes(msg, bPubKey) - msg = protowire.AppendTag(msg, fieldNumSigVal, protowire.BytesType) - msg = protowire.AppendBytes(msg, idSig) - msg = protowire.AppendTag(msg, fieldNumSigScheme, protowire.VarintType) - msg = protowire.AppendVarint(msg, sigScheme) - - return msg, nil -} - -type replicateResponse struct { - err error -} - -func (x replicateResponse) ToGRPCMessage() grpc.Message { - return new(objectgrpc.ReplicateResponse) -} - -func (x *replicateResponse) FromGRPCMessage(gm grpc.Message) error { - m, ok := gm.(*objectgrpc.ReplicateResponse) - if !ok { - return message.NewUnexpectedMessageType(gm, m) - } - - var st *status.Status - if mst := m.GetStatus(); mst != nil { - st = new(status.Status) - err := st.FromGRPCMessage(mst) - if err != nil { - return fmt.Errorf("decode response status: %w", err) - } - } - - x.err = apistatus.ErrorFromV2(st) - - return nil -} +// +// // ReplicateObject copies binary-encoded NeoFS object from the given +// // [io.ReadSeeker] to remote server for local storage. The signer must +// // authenticate a storage node that matches the object's storage policy. Since +// // this property can change over NeoFS system time, compliance with the policy +// // is checked back to foreseeable moment in the past. The server must be a +// // storage node compliant with the current object's storage policy. ID must be +// // the same as in src. +// // +// // ReplicateObject is intended for maintaining data storage by NeoFS system +// // nodes only, not for regular use. +// // +// // Object must be encoded in compliance with Protocol Buffers v3 format in +// // ascending order of fields. +// // +// // Source [io.ReadSeeker] must point to the start. Note that ReplicateObject +// // does not reset src to start after the call. If it is needed, do not forget to +// // Seek. +// // +// // See also [DemuxReplicatedObject]. +// // +// // Return errors: +// // - [apistatus.ErrServerInternal]: internal server error described in the text message; +// // - [apistatus.ErrObjectAccessDenied]: the signer does not authenticate any +// // NeoFS storage node matching storage policy of the container referenced by the +// // replicated object; +// // - [apistatus.ErrContainerNotFound]: the container to which the replicated +// // object is associated was not found. +// func (c *Client) ReplicateObject(ctx context.Context, id oid.ID, src io.ReadSeeker, signer neofscrypto.Signer) error { +// const svcName = "neo.fs.v2.object.ObjectService" +// const opName = "Replicate" +// stream, err := c.c.Init(common.CallMethodInfoUnary(svcName, opName), +// client.WithContext(ctx), client.AllowBinarySendingOnly()) +// if err != nil { +// return fmt.Errorf("init service=%s/op=%s RPC: %w", svcName, opName, err) +// } +// +// msg, err := prepareReplicateMessage(id, src, signer) +// if err != nil { +// return err +// } +// +// err = stream.WriteMessage(client.BinaryMessage(msg)) +// if err != nil && !errors.Is(err, io.EOF) { // io.EOF means the server closed the stream on its side +// return fmt.Errorf("send request: %w", err) +// } +// +// var resp replicateResponse +// err = stream.ReadMessage(&resp) +// if err != nil { +// if errors.Is(err, io.EOF) { +// err = io.ErrUnexpectedEOF +// } +// +// return fmt.Errorf("recv response: %w", err) +// } +// +// _ = stream.Close() +// +// return resp.err +// } +// +// // DemuxReplicatedObject allows to share same argument between multiple +// // [Client.ReplicateObject] calls for deduplication of network messages. This +// // option should be used with caution and only to achieve traffic demux +// // optimization goals. +// func DemuxReplicatedObject(src io.ReadSeeker) io.ReadSeeker { +// return &demuxReplicationMessage{ +// rs: src, +// } +// } +// +// type demuxReplicationMessage struct { +// rs io.ReadSeeker +// +// mtx sync.Mutex +// msg []byte +// err error +// } +// +// func (x *demuxReplicationMessage) Read(p []byte) (n int, err error) { +// return x.rs.Read(p) +// } +// +// func (x *demuxReplicationMessage) Seek(offset int64, whence int) (int64, error) { +// return x.rs.Seek(offset, whence) +// } +// +// func prepareReplicateMessage(id oid.ID, src io.ReadSeeker, signer neofscrypto.Signer) ([]byte, error) { +// srm, ok := src.(*demuxReplicationMessage) +// if !ok { +// return newReplicateMessage(id, src, signer) +// } +// +// srm.mtx.Lock() +// defer srm.mtx.Unlock() +// +// if srm.msg == nil && srm.err == nil { +// srm.msg, srm.err = newReplicateMessage(id, src, signer) +// } +// +// return srm.msg, srm.err +// } +// +// func newReplicateMessage(id oid.ID, src io.ReadSeeker, signer neofscrypto.Signer) ([]byte, error) { +// var objSize uint64 +// switch v := src.(type) { +// default: +// n, err := src.Seek(0, io.SeekEnd) +// if err != nil { +// return nil, fmt.Errorf("seek to end: %w", err) +// } else if n < 0 { +// return nil, fmt.Errorf("seek to end returned negative value %d", objSize) +// } +// +// _, err = src.Seek(-n, io.SeekCurrent) +// if err != nil { +// return nil, fmt.Errorf("seek back to initial pos: %w", err) +// } +// +// objSize = uint64(n) +// case *os.File: +// fileInfo, err := v.Stat() +// if err != nil { +// return nil, fmt.Errorf("get file info: %w", err) +// } +// +// objSize = uint64(fileInfo.Size()) +// case *bytes.Reader: +// n := v.Size() +// if n < 0 { +// return nil, fmt.Errorf("negative byte buffer size return %d", objSize) +// } +// +// objSize = uint64(n) +// } +// +// // TODO: limit the objSize? +// +// idSig, err := signer.Sign(id[:]) +// if err != nil { +// return nil, fmt.Errorf("sign object ID: %w", err) +// } +// +// bPubKey := neofscrypto.PublicKeyBytes(signer.Public()) +// sigScheme := uint64(signer.Scheme()) +// +// const fieldNumObject = 1 +// const fieldNumSignature = 2 +// +// sigSize := protowire.SizeTag(fieldNumSigPubKey) + protowire.SizeBytes(len(bPubKey)) + +// protowire.SizeTag(fieldNumSigVal) + protowire.SizeBytes(len(idSig)) + +// protowire.SizeTag(fieldNumSigScheme) + protowire.SizeVarint(sigScheme) +// +// msgSize := protowire.SizeTag(fieldNumObject) + protowire.SizeVarint(objSize) + +// protowire.SizeTag(fieldNumSignature) + protowire.SizeBytes(sigSize) +// +// // TODO(#544): support external buffers +// msg := make([]byte, 0, uint64(msgSize)+objSize) +// +// msg = protowire.AppendTag(msg, fieldNumObject, protowire.BytesType) +// msg = protowire.AppendVarint(msg, objSize) +// msg = msg[:uint64(len(msg))+objSize] +// +// bufObj := msg[uint64(len(msg))-objSize:] +// _, err = io.ReadFull(src, bufObj) +// if err != nil { +// return nil, fmt.Errorf("read full object into the buffer: %w", err) +// } +// +// msg = protowire.AppendTag(msg, fieldNumSignature, protowire.BytesType) +// msg = protowire.AppendVarint(msg, uint64(sigSize)) +// msg = protowire.AppendTag(msg, fieldNumSigPubKey, protowire.BytesType) +// msg = protowire.AppendBytes(msg, bPubKey) +// msg = protowire.AppendTag(msg, fieldNumSigVal, protowire.BytesType) +// msg = protowire.AppendBytes(msg, idSig) +// msg = protowire.AppendTag(msg, fieldNumSigScheme, protowire.VarintType) +// msg = protowire.AppendVarint(msg, sigScheme) +// +// return msg, nil +// } +// +// type replicateResponse struct { +// err error +// } +// +// func (x replicateResponse) ToGRPCMessage() grpc.Message { +// return new(objectgrpc.ReplicateResponse) +// } +// +// func (x *replicateResponse) FromGRPCMessage(gm grpc.Message) error { +// m, ok := gm.(*objectgrpc.ReplicateResponse) +// if !ok { +// return message.NewUnexpectedMessageType(gm, m) +// } +// +// var st *status.Status +// if mst := m.GetStatus(); mst != nil { +// st = new(status.Status) +// err := st.FromGRPCMessage(mst) +// if err != nil { +// return fmt.Errorf("decode response status: %w", err) +// } +// } +// +// x.err = apistatus.ErrorFromV2(st) +// +// return nil +// } diff --git a/client/object_replicate_test.go b/client/object_replicate_test.go index af363448e..d97a9a9f2 100644 --- a/client/object_replicate_test.go +++ b/client/object_replicate_test.go @@ -1,222 +1,198 @@ package client -import ( - "bytes" - "context" - "crypto/rand" - "fmt" - "net" - "sync" - "testing" - - objectgrpc "github.com/nspcc-dev/neofs-api-go/v2/object/grpc" - "github.com/nspcc-dev/neofs-api-go/v2/rpc/client" - status "github.com/nspcc-dev/neofs-api-go/v2/status/grpc" - apistatus "github.com/nspcc-dev/neofs-sdk-go/client/status" - neofscrypto "github.com/nspcc-dev/neofs-sdk-go/crypto" - "github.com/nspcc-dev/neofs-sdk-go/crypto/test" - "github.com/nspcc-dev/neofs-sdk-go/object" - oidtest "github.com/nspcc-dev/neofs-sdk-go/object/id/test" - objecttest "github.com/nspcc-dev/neofs-sdk-go/object/test" - "github.com/stretchr/testify/require" - "google.golang.org/grpc" - "google.golang.org/grpc/credentials/insecure" - "google.golang.org/grpc/test/bufconn" - "google.golang.org/protobuf/proto" -) - -func BenchmarkPrepareReplicationMessage(b *testing.B) { - bObj := make([]byte, 1<<10) - _, err := rand.Read(bObj) // structure does not matter for - require.NoError(b, err) - id := oidtest.ID() - - var signer nopSigner - - b.ReportAllocs() - b.ResetTimer() - - for i := 0; i < b.N; i++ { - _, err = prepareReplicateMessage(id, bytes.NewReader(bObj), signer) - require.NoError(b, err) - } -} - -type testReplicationServer struct { - objectgrpc.UnimplementedObjectServiceServer - - clientSigner neofscrypto.Signer - clientObj object.Object - - respStatusCode uint32 -} - -func (x *testReplicationServer) Replicate(_ context.Context, req *objectgrpc.ReplicateRequest) (*objectgrpc.ReplicateResponse, error) { - var resp objectgrpc.ReplicateResponse - var st status.Status - - objMsg := req.GetObject() - if objMsg == nil { - st.Code = 1024 // internal error - st.Message = "missing object field" - resp.Status = &st - return &resp, nil - } - - sigMsg := req.GetSignature() - if objMsg == nil { - st.Code = 1024 // internal error - st.Message = "missing signature field" - resp.Status = &st - return &resp, nil - } - - var obj object.Object - - bObj, _ := proto.Marshal(objMsg) - err := obj.Unmarshal(bObj) - if err != nil { - st.Code = 1024 // internal error - st.Message = fmt.Sprintf("decode binary object: %v", err) - resp.Status = &st - return &resp, nil - } - - bObjSent, _ := x.clientObj.Marshal() - bObjRecv, _ := obj.Marshal() - - if !bytes.Equal(bObjSent, bObjRecv) { - st.Code = 1024 // internal error - st.Message = "received object differs from the sent one" - resp.Status = &st - return &resp, nil - } - - if !bytes.Equal(sigMsg.GetKey(), neofscrypto.PublicKeyBytes(x.clientSigner.Public())) { - st.Code = 1024 // internal error - st.Message = "public key in the received signature differs with the client's one" - resp.Status = &st - return &resp, nil - } - - if int32(sigMsg.GetScheme()) != int32(x.clientSigner.Scheme()) { - st.Code = 1024 // internal error - st.Message = "signature scheme in the received signature differs with the client's one" - resp.Status = &st - return &resp, nil - } - - id, ok := obj.ID() - if !ok { - st.Code = 1024 // internal error - st.Message = "missing object ID" - resp.Status = &st - return &resp, nil - } - - if !x.clientSigner.Public().Verify(id[:], sigMsg.GetSign()) { - st.Code = 1024 // internal error - st.Message = "signature verification failed" - resp.Status = &st - return &resp, nil - } - - resp.Status = &status.Status{Code: x.respStatusCode} - return &resp, nil -} - -func serveObjectReplication(tb testing.TB, clientSigner neofscrypto.Signer, clientObj object.Object) (*testReplicationServer, *Client) { - lis := bufconn.Listen(1 << 10) - - var replicationSrv testReplicationServer - - gSrv := grpc.NewServer() - objectgrpc.RegisterObjectServiceServer(gSrv, &replicationSrv) - - gConn, err := grpc.Dial("", grpc.WithContextDialer(func(_ context.Context, _ string) (net.Conn, error) { - return lis.Dial() - }), grpc.WithTransportCredentials(insecure.NewCredentials())) - require.NoError(tb, err) - - tb.Cleanup(gSrv.Stop) - - go func() { _ = gSrv.Serve(lis) }() - - replicationSrv.clientObj = clientObj - replicationSrv.clientSigner = clientSigner - - return &replicationSrv, &Client{ - c: *client.New(client.WithGRPCConn(gConn)), - } -} - -func TestClient_ReplicateObject(t *testing.T) { - ctx := context.Background() - signer := test.RandomSigner(t) - obj := objecttest.Object(t) - id := oidtest.ID() - obj.SetID(id) - bObj, _ := obj.Marshal() - - t.Run("OK", func(t *testing.T) { - srv, cli := serveObjectReplication(t, signer, obj) - srv.respStatusCode = 0 - - err := cli.ReplicateObject(ctx, id, bytes.NewReader(bObj), signer) - require.NoError(t, err) - }) - - t.Run("invalid binary object", func(t *testing.T) { - bObj := []byte("Hello, world!") // definitely incorrect binary object - _, cli := serveObjectReplication(t, signer, obj) - - err := cli.ReplicateObject(ctx, id, bytes.NewReader(bObj), signer) - require.Error(t, err) - }) - - t.Run("statuses", func(t *testing.T) { - for _, tc := range []struct { - code uint32 - expErr error - desc string - }{ - {code: 1024, expErr: apistatus.ErrServerInternal, desc: "internal server error"}, - {code: 2048, expErr: apistatus.ErrObjectAccessDenied, desc: "forbidden"}, - {code: 3072, expErr: apistatus.ErrContainerNotFound, desc: "container not found"}, - } { - srv, cli := serveObjectReplication(t, signer, obj) - srv.respStatusCode = tc.code - - err := cli.ReplicateObject(ctx, id, bytes.NewReader(bObj), signer) - require.ErrorIs(t, err, tc.expErr, tc.desc) - } - }) - - t.Run("demux", func(t *testing.T) { - demuxObj := DemuxReplicatedObject(bytes.NewReader(bObj)) - _, cli := serveObjectReplication(t, signer, obj) - - err := cli.ReplicateObject(ctx, id, demuxObj, signer) - require.NoError(t, err) - - msgCp := bytes.Clone(demuxObj.(*demuxReplicationMessage).msg) - initBufPtr := &demuxObj.(*demuxReplicationMessage).msg[0] - - var wg sync.WaitGroup - for i := 0; i < 5; i++ { - wg.Add(1) - go func() { - defer wg.Done() - - err := cli.ReplicateObject(ctx, id, demuxObj, signer) - fmt.Println(err) - require.NoError(t, err) - }() - } - - wg.Wait() - - require.Equal(t, msgCp, demuxObj.(*demuxReplicationMessage).msg) - require.Equal(t, initBufPtr, &demuxObj.(*demuxReplicationMessage).msg[0]) - }) -} +// +// func BenchmarkPrepareReplicationMessage(b *testing.B) { +// bObj := make([]byte, 1<<10) +// _, err := rand.Read(bObj) // structure does not matter for +// require.NoError(b, err) +// id := oidtest.ID() +// +// var signer nopSigner +// +// b.ReportAllocs() +// b.ResetTimer() +// +// for i := 0; i < b.N; i++ { +// _, err = prepareReplicateMessage(id, bytes.NewReader(bObj), signer) +// require.NoError(b, err) +// } +// } +// +// type testReplicationServer struct { +// objectgrpc.UnimplementedObjectServiceServer +// +// clientSigner neofscrypto.Signer +// clientObj object.Object +// +// respStatusCode uint32 +// } +// +// func (x *testReplicationServer) Replicate(_ context.Context, req *objectgrpc.ReplicateRequest) (*objectgrpc.ReplicateResponse, error) { +// var resp objectgrpc.ReplicateResponse +// var st status.Status +// +// objMsg := req.GetObject() +// if objMsg == nil { +// st.Code = 1024 // internal error +// st.Message = "missing object field" +// resp.Status = &st +// return &resp, nil +// } +// +// sigMsg := req.GetSignature() +// if objMsg == nil { +// st.Code = 1024 // internal error +// st.Message = "missing signature field" +// resp.Status = &st +// return &resp, nil +// } +// +// var obj object.Object +// +// bObj, _ := proto.Marshal(objMsg) +// err := obj.Unmarshal(bObj) +// if err != nil { +// st.Code = 1024 // internal error +// st.Message = fmt.Sprintf("decode binary object: %v", err) +// resp.Status = &st +// return &resp, nil +// } +// +// bObjSent, _ := x.clientObj.Marshal() +// bObjRecv, _ := obj.Marshal() +// +// if !bytes.Equal(bObjSent, bObjRecv) { +// st.Code = 1024 // internal error +// st.Message = "received object differs from the sent one" +// resp.Status = &st +// return &resp, nil +// } +// +// if !bytes.Equal(sigMsg.GetKey(), neofscrypto.PublicKeyBytes(x.clientSigner.Public())) { +// st.Code = 1024 // internal error +// st.Message = "public key in the received signature differs with the client's one" +// resp.Status = &st +// return &resp, nil +// } +// +// if int32(sigMsg.GetScheme()) != int32(x.clientSigner.Scheme()) { +// st.Code = 1024 // internal error +// st.Message = "signature scheme in the received signature differs with the client's one" +// resp.Status = &st +// return &resp, nil +// } +// +// id, ok := obj.ID() +// if !ok { +// st.Code = 1024 // internal error +// st.Message = "missing object ID" +// resp.Status = &st +// return &resp, nil +// } +// +// if !x.clientSigner.Public().Verify(id[:], sigMsg.GetSign()) { +// st.Code = 1024 // internal error +// st.Message = "signature verification failed" +// resp.Status = &st +// return &resp, nil +// } +// +// resp.Status = &status.Status{Code: x.respStatusCode} +// return &resp, nil +// } +// +// func serveObjectReplication(tb testing.TB, clientSigner neofscrypto.Signer, clientObj object.Object) (*testReplicationServer, *Client) { +// lis := bufconn.Listen(1 << 10) +// +// var replicationSrv testReplicationServer +// +// gSrv := grpc.NewServer() +// objectgrpc.RegisterObjectServiceServer(gSrv, &replicationSrv) +// +// gConn, err := grpc.Dial("", grpc.WithContextDialer(func(_ context.Context, _ string) (net.Conn, error) { +// return lis.Dial() +// }), grpc.WithTransportCredentials(insecure.NewCredentials())) +// require.NoError(tb, err) +// +// tb.Cleanup(gSrv.Stop) +// +// go func() { _ = gSrv.Serve(lis) }() +// +// replicationSrv.clientObj = clientObj +// replicationSrv.clientSigner = clientSigner +// +// return &replicationSrv, &Client{ +// c: *client.New(client.WithGRPCConn(gConn)), +// } +// } +// +// func TestClient_ReplicateObject(t *testing.T) { +// ctx := context.Background() +// signer := test.RandomSigner(t) +// obj := objecttest.Object(t) +// id := oidtest.ID() +// obj.SetID(id) +// bObj, _ := obj.Marshal() +// +// t.Run("OK", func(t *testing.T) { +// srv, cli := serveObjectReplication(t, signer, obj) +// srv.respStatusCode = 0 +// +// err := cli.ReplicateObject(ctx, id, bytes.NewReader(bObj), signer) +// require.NoError(t, err) +// }) +// +// t.Run("invalid binary object", func(t *testing.T) { +// bObj := []byte("Hello, world!") // definitely incorrect binary object +// _, cli := serveObjectReplication(t, signer, obj) +// +// err := cli.ReplicateObject(ctx, id, bytes.NewReader(bObj), signer) +// require.Error(t, err) +// }) +// +// t.Run("statuses", func(t *testing.T) { +// for _, tc := range []struct { +// code uint32 +// expErr error +// desc string +// }{ +// {code: 1024, expErr: apistatus.ErrServerInternal, desc: "internal server error"}, +// {code: 2048, expErr: apistatus.ErrObjectAccessDenied, desc: "forbidden"}, +// {code: 3072, expErr: apistatus.ErrContainerNotFound, desc: "container not found"}, +// } { +// srv, cli := serveObjectReplication(t, signer, obj) +// srv.respStatusCode = tc.code +// +// err := cli.ReplicateObject(ctx, id, bytes.NewReader(bObj), signer) +// require.ErrorIs(t, err, tc.expErr, tc.desc) +// } +// }) +// +// t.Run("demux", func(t *testing.T) { +// demuxObj := DemuxReplicatedObject(bytes.NewReader(bObj)) +// _, cli := serveObjectReplication(t, signer, obj) +// +// err := cli.ReplicateObject(ctx, id, demuxObj, signer) +// require.NoError(t, err) +// +// msgCp := bytes.Clone(demuxObj.(*demuxReplicationMessage).msg) +// initBufPtr := &demuxObj.(*demuxReplicationMessage).msg[0] +// +// var wg sync.WaitGroup +// for i := 0; i < 5; i++ { +// wg.Add(1) +// go func() { +// defer wg.Done() +// +// err := cli.ReplicateObject(ctx, id, demuxObj, signer) +// fmt.Println(err) +// require.NoError(t, err) +// }() +// } +// +// wg.Wait() +// +// require.Equal(t, msgCp, demuxObj.(*demuxReplicationMessage).msg) +// require.Equal(t, initBufPtr, &demuxObj.(*demuxReplicationMessage).msg[0]) +// }) +// } diff --git a/client/object_search.go b/client/object_search.go index a3641bc64..1658c7a42 100644 --- a/client/object_search.go +++ b/client/object_search.go @@ -5,238 +5,240 @@ import ( "errors" "fmt" "io" + "time" - "github.com/nspcc-dev/neofs-api-go/v2/acl" - v2object "github.com/nspcc-dev/neofs-api-go/v2/object" - v2refs "github.com/nspcc-dev/neofs-api-go/v2/refs" - rpcapi "github.com/nspcc-dev/neofs-api-go/v2/rpc" - "github.com/nspcc-dev/neofs-api-go/v2/rpc/client" + apiacl "github.com/nspcc-dev/neofs-sdk-go/api/acl" + apiobject "github.com/nspcc-dev/neofs-sdk-go/api/object" + "github.com/nspcc-dev/neofs-sdk-go/api/refs" + apisession "github.com/nspcc-dev/neofs-sdk-go/api/session" "github.com/nspcc-dev/neofs-sdk-go/bearer" + apistatus "github.com/nspcc-dev/neofs-sdk-go/client/status" cid "github.com/nspcc-dev/neofs-sdk-go/container/id" + neofscrypto "github.com/nspcc-dev/neofs-sdk-go/crypto" "github.com/nspcc-dev/neofs-sdk-go/object" oid "github.com/nspcc-dev/neofs-sdk-go/object/id" + "github.com/nspcc-dev/neofs-sdk-go/session" "github.com/nspcc-dev/neofs-sdk-go/stat" - "github.com/nspcc-dev/neofs-sdk-go/user" ) -var ( - // special variable for test purposes only, to overwrite real RPC calls. - rpcAPISearchObjects = rpcapi.SearchObjects -) +const fieldObjectIDList = "object ID list" + +// SelectObjectsOptions groups optional parameters of [Client.SelectObjects]. +type SelectObjectsOptions struct { + local bool -// PrmObjectSearch groups optional parameters of ObjectSearch operation. -type PrmObjectSearch struct { - sessionContainer + sessionSet bool + session session.Object - filters object.SearchFilters + bearerTokenSet bool + bearerToken bearer.Token } -// MarkLocal tells the server to execute the operation locally. -func (x *PrmObjectSearch) MarkLocal() { - x.meta.SetTTL(1) +// PreventForwarding disables request forwarding to container nodes and +// instructs the server to select objects from the local storage only. +func (x *SelectObjectsOptions) PreventForwarding() { + x.local = true } -// WithBearerToken attaches bearer token to be used for the operation. +// WithinSession specifies token of the session preliminary issued by some user +// with the client signer. Session must include [session.VerbObjectSearch] +// action. The token must be signed and target the subject authenticated by +// signer passed to [Client.SelectObjects]. If set, the session issuer will be +// treated as the original request sender. // -// If set, underlying eACL rules will be used in access control. +// Note that sessions affect access control only indirectly: they just replace +// request originator. // -// Must be signed. -func (x *PrmObjectSearch) WithBearerToken(t bearer.Token) { - var v2token acl.BearerToken - t.WriteToV2(&v2token) - x.meta.SetBearerToken(&v2token) -} - -// WithXHeaders specifies list of extended headers (string key-value pairs) -// to be attached to the request. Must have an even length. +// With session, [Client.SelectObjects] can also return +// [apistatus.ErrSessionTokenExpired] if the token has expired: this usually +// requires re-issuing the session. // -// Slice must not be mutated until the operation completes. -func (x *PrmObjectSearch) WithXHeaders(hs ...string) { - writeXHeadersToMeta(hs, &x.meta) +// Note that it makes no sense to start session with the server via +// [Client.StartSession] like for [Client.DeleteObject] or [Client.PutObject]. +func (x *SelectObjectsOptions) WithinSession(s session.Object) { + x.session, x.sessionSet = s, true } -// SetFilters sets filters by which to select objects. All container objects -// match unset/empty filters. -func (x *PrmObjectSearch) SetFilters(filters object.SearchFilters) { - x.filters = filters +// WithBearerToken attaches bearer token carrying extended ACL rules that +// replace eACL of the container. The token must be issued by the container +// owner and target the subject authenticated by signer passed to +// [Client.SelectObjects]. In practice, bearer token makes sense only if it +// grants selecting rights to the subject. +func (x *SelectObjectsOptions) WithBearerToken(t bearer.Token) { + x.bearerToken, x.bearerTokenSet = t, true } -// ObjectListReader is designed to read list of object identifiers from NeoFS system. -// -// Must be initialized using Client.ObjectSearch, any other usage is unsafe. -type ObjectListReader struct { - client *Client - cancelCtxStream context.CancelFunc - err error - stream interface { - Read(resp *v2object.SearchResponse) error - } - tail []v2refs.ObjectID +// AllObjectsQuery returns search query to select all objects in particular +// container. +func AllObjectsQuery() []object.SearchFilter { return nil } - statisticCallback shortStatisticCallback -} +var errBreak = errors.New("break") -// Read reads another list of the object identifiers. Works similar to -// io.Reader.Read but copies oid.ID. -// -// Failure reason can be received via Close. -// -// Panics if buf has zero length. -func (x *ObjectListReader) Read(buf []oid.ID) (int, error) { - if len(buf) == 0 { - panic("empty buffer in ObjectListReader.ReadList") +// allows to share code b/w various methods calling object search. ID list +// passed to f is always non-empty. If f returns errBreak, this method breaks +// with no error. +func (c *Client) forEachSelectedObjectsSet(ctx context.Context, cnr cid.ID, signer neofscrypto.Signer, opts SelectObjectsOptions, + filters []object.SearchFilter, f func(nResp int, ids []*refs.ObjectID) error) (err error) { + if signer == nil { + return errMissingSigner } - read := copyIDBuffers(buf, x.tail) - x.tail = x.tail[read:] - - if len(buf) == read { - return read, nil + if c.handleAPIOpResult != nil { + defer func(start time.Time) { + c.handleAPIOpResult(c.serverPubKey, c.endpoint, stat.MethodObjectSearch, time.Since(start), err) + }(time.Now()) } - for { - var resp v2object.SearchResponse - x.err = x.stream.Read(&resp) - if x.err != nil { - return read, x.err - } - - x.err = x.client.processResponse(&resp) - if x.err != nil { - return read, x.err - } - - // read new chunk of objects - ids := resp.GetBody().GetIDList() - if len(ids) == 0 { - // just skip empty lists since they are not prohibited by protocol - continue - } - - ln := copyIDBuffers(buf[read:], ids) - read += ln - - if read == len(buf) { - // save the tail - x.tail = append(x.tail, ids[ln:]...) - - return read, nil + // form request + req := &apiobject.SearchRequest{ + Body: &apiobject.SearchRequest_Body{ + ContainerId: new(refs.ContainerID), + }, + MetaHeader: new(apisession.RequestMetaHeader), + } + cnr.WriteToV2(req.Body.ContainerId) + if len(filters) > 0 { + req.Body.Filters = make([]*apiobject.SearchRequest_Body_Filter, len(filters)) + for i := range filters { + req.Body.Filters[i] = new(apiobject.SearchRequest_Body_Filter) + filters[i].WriteToV2(req.Body.Filters[i]) } } -} - -func copyIDBuffers(dst []oid.ID, src []v2refs.ObjectID) int { - var i int - for ; i < len(dst) && i < len(src); i++ { - _ = dst[i].ReadFromV2(src[i]) + if opts.sessionSet { + req.MetaHeader.SessionToken = new(apisession.SessionToken) + opts.session.WriteToV2(req.MetaHeader.SessionToken) + } + if opts.bearerTokenSet { + req.MetaHeader.BearerToken = new(apiacl.BearerToken) + opts.bearerToken.WriteToV2(req.MetaHeader.BearerToken) + } + if opts.local { + req.MetaHeader.Ttl = 1 + } else { + req.MetaHeader.Ttl = 2 + } + // FIXME: balance requests need small fixed-size buffers for encoding, its makes + // no sense to mosh them with other buffers + buf := c.signBuffers.Get().(*[]byte) + defer c.signBuffers.Put(buf) + if req.VerifyHeader, err = neofscrypto.SignRequest(signer, req, req.Body, *buf); err != nil { + return fmt.Errorf("%s: %w", errSignRequest, err) } - return i -} -// Iterate iterates over the list of found object identifiers. -// f can return true to stop iteration earlier. -// -// Returns an error if object can't be read. -func (x *ObjectListReader) Iterate(f func(oid.ID) bool) error { - buf := make([]oid.ID, 1) + // send request + ctx, cancelStream := context.WithCancel(ctx) + defer cancelStream() + stream, err := c.transport.object.Search(ctx, req) + if err != nil { + return fmt.Errorf("%s: %w", errTransport, err) + } - for { - _, err := x.Read(buf) + // read the stream + var resp apiobject.SearchResponse + var lastStatus apistatus.StatusV2 + mustFin := false + for n := 0; ; n++ { + err = stream.RecvMsg(&resp) + if err != nil { + if errors.Is(err, io.EOF) { + if n > 0 { // at least 1 message carrying status is required + return lastStatus + } + return errors.New("stream ended without a status response") + } + return fmt.Errorf("%s while reading response #%d: %w", errTransport, n, err) + } else if mustFin { + return fmt.Errorf("stream is not completed after the message #%d which must be the last one", n-1) + } + // intercept response info + if c.interceptAPIRespInfo != nil && n == 0 { + if err = c.interceptAPIRespInfo(ResponseMetaInfo{ + key: resp.GetVerifyHeader().GetBodySignature().GetKey(), + epoch: resp.GetMetaHeader().GetEpoch(), + }); err != nil { + return fmt.Errorf("%s: %w", errInterceptResponseInfo, err) + } + } + if err = neofscrypto.VerifyResponse(&resp, resp.Body); err != nil { + return fmt.Errorf("invalid response #%d: %s: %w", n, errResponseSignature, err) + } + lastStatus, err = apistatus.ErrorFromV2(resp.GetMetaHeader().GetStatus()) if err != nil { - return x.Close() + return fmt.Errorf("invalid response #%d: %s: %w", n, errInvalidResponseStatus, err) + } else if lastStatus != nil { + mustFin = true + continue } - if f(buf[0]) { - return nil + if resp.Body == nil || len(resp.Body.IdList) == 0 { + if n == 0 { + mustFin = true + continue + } + // technically, we can continue. But if the server is malicious/buggy, it may + // return zillion of such messages, and the only thing that could save us is the + // context. So, it's safer to fail immediately. + return fmt.Errorf("invalid response #%d: empty %s is only allowed in the first stream message", n, fieldObjectIDList) + } + if err = f(n, resp.Body.IdList); err != nil { + if errors.Is(err, errBreak) { + return nil + } + return err } } } -// Close ends reading list of the matched objects and returns the result of the operation -// along with the final results. Must be called after using the ObjectListReader. +// SelectObjects selects objects from the referenced container that match all +// specified search filters and returns their IDs. In particular, the empty set +// of filters matches all container objects ([AllObjectsQuery] may be used for +// this to make code clearer). if no matching objects are found, SelectObjects +// returns an empty result without an error. SelectObjects returns buffered +// objects regardless of error so that the caller can process the partial result +// if needed. // -// Any client's internal or transport errors are returned as Go built-in error. -// If Client is tuned to resolve NeoFS API statuses, then NeoFS failures -// codes are returned as error. +// SelectObjects returns: +// - [apistatus.ErrContainerNotFound] if referenced container is missing +// - [apistatus.ErrObjectAccessDenied] if signer has no access to select objects // -// Return errors: -// - global (see Client docs) -// - [apistatus.ErrContainerNotFound] -// - [apistatus.ErrObjectAccessDenied] -// - [apistatus.ErrSessionTokenExpired] -func (x *ObjectListReader) Close() error { - var err error - if x.statisticCallback != nil { - defer func() { - x.statisticCallback(err) - }() - } - - defer x.cancelCtxStream() - - if x.err != nil && !errors.Is(x.err, io.EOF) { - err = x.err - return err - } - - return nil +// The method places the identifiers of all selected objects in a memory buffer, +// which can be quite large for some containers/queries. If full buffering is +// not required, [Client.ForEachSelectedObject] may be used to increase resource +// efficiency. +func (c *Client) SelectObjects(ctx context.Context, cnr cid.ID, signer neofscrypto.Signer, opts SelectObjectsOptions, filters []object.SearchFilter) ([]oid.ID, error) { + var res []oid.ID + return res, c.forEachSelectedObjectsSet(ctx, cnr, signer, opts, filters, func(nResp int, ids []*refs.ObjectID) error { + off := len(res) + res = append(res, make([]oid.ID, len(ids))...) + for i := range ids { + if ids[i] == nil { + return fmt.Errorf("invalid respone #%d: invalid body: invalid field (%s): nil element #%d", nResp, fieldObjectIDList, i) + } else if err := res[off+i].ReadFromV2(ids[i]); err != nil { + res = res[:off+i] + return fmt.Errorf("invalid response #%d: invalid body: invalid field (%s): invalid element #%d: %w", nResp, fieldObjectIDList, i, err) + } + } + return nil + }) } -// ObjectSearchInit initiates object selection through a remote server using NeoFS API protocol. -// -// The call only opens the transmission channel, explicit fetching of matched objects -// is done using the ObjectListReader. Exactly one return value is non-nil. -// Resulting reader must be finally closed. -// -// Context is required and must not be nil. It is used for network communication. -// -// Signer is required and must not be nil. The operation is executed on behalf of the account corresponding to -// the specified Signer, which is taken into account, in particular, for access control. -// -// Return errors: -// - [ErrMissingSigner] -func (c *Client) ObjectSearchInit(ctx context.Context, containerID cid.ID, signer user.Signer, prm PrmObjectSearch) (*ObjectListReader, error) { - var err error - defer func() { - c.sendStatistic(stat.MethodObjectSearch, err)() - }() - - if signer == nil { - return nil, ErrMissingSigner - } - - var cidV2 v2refs.ContainerID - containerID.WriteToV2(&cidV2) - - var body v2object.SearchRequestBody - body.SetVersion(1) - body.SetContainerID(&cidV2) - body.SetFilters(prm.filters.ToV2()) - - // init reader - var req v2object.SearchRequest - req.SetBody(&body) - c.prepareRequest(&req, &prm.meta) - - buf := c.buffers.Get().(*[]byte) - err = signServiceMessage(signer, &req, *buf) - c.buffers.Put(buf) - if err != nil { - err = fmt.Errorf("sign request: %w", err) - return nil, err - } - - var r ObjectListReader - ctx, r.cancelCtxStream = context.WithCancel(ctx) - - r.stream, err = rpcAPISearchObjects(&c.c, &req, client.WithContext(ctx)) - if err != nil { - err = fmt.Errorf("open stream: %w", err) - return nil, err - } - r.client = c - r.statisticCallback = func(err error) { - c.sendStatistic(stat.MethodObjectSearchStream, err)() - } - - return &r, nil +// ForEachSelectedObject works like [Client.SelectObjects] but passes each +// select object's ID to f. If f returns false, ForEachSelectedObject breaks +// without an error. ForEachSelectedObject, like [Client.SelectObjects], returns +// no error if no matching objects are found (this case can be detected by the +// caller via f closure). +func (c *Client) ForEachSelectedObject(ctx context.Context, cnr cid.ID, signer neofscrypto.Signer, opts SelectObjectsOptions, + filters []object.SearchFilter, f func(oid.ID) bool) error { + return c.forEachSelectedObjectsSet(ctx, cnr, signer, opts, filters, func(nResp int, ids []*refs.ObjectID) error { + var id oid.ID + for i := range ids { + if ids[i] == nil { + return fmt.Errorf("invalid respone #%d: invalid body: invalid field (%s): nil element #%d", nResp, fieldObjectIDList, i) + } else if err := id.ReadFromV2(ids[i]); err != nil { + return fmt.Errorf("invalid response #%d: invalid body: invalid field (%s): invalid element #%d: %w", nResp, fieldObjectIDList, i, err) + } else if !f(id) { + return errBreak + } + } + return nil + }) } diff --git a/client/object_search_test.go b/client/object_search_test.go index 339af643d..97c94a4db 100644 --- a/client/object_search_test.go +++ b/client/object_search_test.go @@ -1,195 +1,723 @@ package client import ( + "bytes" "context" "errors" - "io" + "fmt" + "math/rand" + "net" + "strconv" "testing" - - v2object "github.com/nspcc-dev/neofs-api-go/v2/object" - "github.com/nspcc-dev/neofs-api-go/v2/refs" + "time" + + apinetmap "github.com/nspcc-dev/neofs-sdk-go/api/netmap" + apiobject "github.com/nspcc-dev/neofs-sdk-go/api/object" + "github.com/nspcc-dev/neofs-sdk-go/api/refs" + apisession "github.com/nspcc-dev/neofs-sdk-go/api/session" + "github.com/nspcc-dev/neofs-sdk-go/api/status" + "github.com/nspcc-dev/neofs-sdk-go/bearer" + bearertest "github.com/nspcc-dev/neofs-sdk-go/bearer/test" + apistatus "github.com/nspcc-dev/neofs-sdk-go/client/status" cid "github.com/nspcc-dev/neofs-sdk-go/container/id" + cidtest "github.com/nspcc-dev/neofs-sdk-go/container/id/test" neofscrypto "github.com/nspcc-dev/neofs-sdk-go/crypto" - "github.com/nspcc-dev/neofs-sdk-go/crypto/test" + neofscryptotest "github.com/nspcc-dev/neofs-sdk-go/crypto/test" + netmaptest "github.com/nspcc-dev/neofs-sdk-go/netmap/test" + "github.com/nspcc-dev/neofs-sdk-go/object" oid "github.com/nspcc-dev/neofs-sdk-go/object/id" oidtest "github.com/nspcc-dev/neofs-sdk-go/object/id/test" + "github.com/nspcc-dev/neofs-sdk-go/session" + sessiontest "github.com/nspcc-dev/neofs-sdk-go/session/test" + "github.com/nspcc-dev/neofs-sdk-go/stat" + usertest "github.com/nspcc-dev/neofs-sdk-go/user/test" + versiontest "github.com/nspcc-dev/neofs-sdk-go/version/test" "github.com/stretchr/testify/require" + "google.golang.org/grpc" + "google.golang.org/grpc/test/bufconn" ) -func TestObjectSearch(t *testing.T) { - ids := make([]oid.ID, 20) - for i := range ids { - ids[i] = oidtest.ID() - } +func TestAllObjectsQuery(t *testing.T) { + require.Empty(t, AllObjectsQuery()) +} - p, resp := testListReaderResponse(t) +type searchObjectsServer struct { + noOtherObjectCalls + // client + clientSigner neofscrypto.Signer + cnr cid.ID + filters []object.SearchFilter + local bool + session *session.Object + bearerToken *bearer.Token + // server + sleepDur time.Duration + endpointInfoOnDialServer + emptyStream bool + idLists [][]oid.ID + // allows to modify n-th response in the stream and replace it with returned + // transport error + modifyResp func(n int, r *apiobject.SearchResponse) error + // allows to corrupt signature of the n-th response in the stream + corruptRespSig func(n int, r *apiobject.SearchResponse) +} - buf := make([]oid.ID, 2) - checkRead := func(t *testing.T, expected []oid.ID, expectedErr error) { - n, err := resp.Read(buf) - if expectedErr == nil { - require.NoError(t, err) - require.True(t, len(expected) == len(buf), "expected the same length") - } else { - require.Error(t, err) - require.True(t, len(expected) != len(buf), "expected different length") +func (x searchObjectsServer) sendResponse(stream apiobject.ObjectService_SearchServer, n int, resp *apiobject.SearchResponse) error { + var err error + if x.modifyResp != nil { + if err = x.modifyResp(n, resp); err != nil { + return err } - - require.Equal(t, len(expected), n, "expected %d items to be read", len(expected)) - require.Equal(t, expected, buf[:len(expected)]) } - - // nil panic - require.Panics(t, func() { - _, _ = resp.Read(nil) - }) - - // no data - resp.stream = newSearchStream(p, io.EOF, []oid.ID{}) - checkRead(t, []oid.ID{}, io.EOF) - - // both ID fetched - resp.stream = newSearchStream(p, nil, ids[:3]) - checkRead(t, ids[:2], nil) - - // one ID cached, second fetched - resp.stream = newSearchStream(p, nil, ids[3:6]) - checkRead(t, ids[2:4], nil) - - // both ID cached - resp.stream = nil // shouldn't be called, panic if so - checkRead(t, ids[4:6], nil) - - // both ID fetched in 2 requests, with empty one in the middle - resp.stream = newSearchStream(p, nil, ids[6:7], nil, ids[7:8]) - checkRead(t, ids[6:8], nil) - - // read from tail multiple times - resp.stream = newSearchStream(p, nil, ids[8:11]) - buf = buf[:1] - checkRead(t, ids[8:9], nil) - checkRead(t, ids[9:10], nil) - checkRead(t, ids[10:11], nil) - - // handle EOF - buf = buf[:2] - resp.stream = newSearchStream(p, io.EOF, ids[11:12]) - checkRead(t, ids[11:12], io.EOF) + resp.VerifyHeader, err = neofscrypto.SignResponse(x.serverSigner, resp, resp.Body, nil) + if err != nil { + return fmt.Errorf("sign response: %w", err) + } + if x.corruptRespSig != nil { + x.corruptRespSig(n, resp) + } + return stream.Send(resp) } -func TestObjectIterate(t *testing.T) { - ids := make([]oid.ID, 3) - for i := range ids { - ids[i] = oidtest.ID() +func (x searchObjectsServer) Search(req *apiobject.SearchRequest, stream apiobject.ObjectService_SearchServer) error { + if x.sleepDur > 0 { + time.Sleep(x.sleepDur) } + if x.emptyStream { + return nil + } + var sts status.Status + var err error + var cnr cid.ID + sigScheme := refs.SignatureScheme(x.clientSigner.Scheme()) + creatorPubKey := neofscrypto.PublicKeyBytes(x.clientSigner.Public()) + if req == nil { + sts.Code, sts.Message = status.InternalServerError, "nil request" + } else if err = neofscrypto.VerifyRequest(req, req.Body); err != nil { + sts.Code, sts.Message = status.SignatureVerificationFail, err.Error() + } else if req.VerifyHeader.BodySignature.Scheme != sigScheme || + !bytes.Equal(req.VerifyHeader.BodySignature.Key, creatorPubKey) { + sts.Code, sts.Message = status.InternalServerError, "[test] unexpected request body signature credentials" + } else if req.VerifyHeader.MetaSignature.Scheme != sigScheme || + !bytes.Equal(req.VerifyHeader.MetaSignature.Key, creatorPubKey) { + sts.Code, sts.Message = status.InternalServerError, "[test] unexpected request meta header signature credentials" + } else if req.VerifyHeader.OriginSignature.Scheme != sigScheme || + !bytes.Equal(req.VerifyHeader.OriginSignature.Key, creatorPubKey) { + sts.Code, sts.Message = status.InternalServerError, "[test] unexpected origin request verification header signature credentials" + } else if req.MetaHeader == nil { + sts.Code, sts.Message = status.InternalServerError, "invalid request: missing meta header" + } else if x.local && req.MetaHeader.Ttl != 1 || !x.local && req.MetaHeader.Ttl != 2 { + sts.Code, sts.Message = status.InternalServerError, fmt.Sprintf("invalid request: invalid meta header: invalid TTL %d", req.MetaHeader.Ttl) + } else if req.Body == nil { + sts.Code, sts.Message = status.InternalServerError, "invalid request: missing body" + } else if req.Body.Version > 0 { + sts.Code, sts.Message = status.InternalServerError, "invalid request: invalid body: query version set" + } else if req.Body.ContainerId == nil { + sts.Code, sts.Message = status.InternalServerError, "invalid request: invalid body: missing address" + } else if err = cnr.ReadFromV2(req.Body.ContainerId); err != nil { + sts.Code, sts.Message = status.InternalServerError, fmt.Sprintf("invalid request: invalid body: invalid container: %s", err) + } else if cnr != x.cnr { + sts.Code, sts.Message = status.InternalServerError, "[test] wrong container" + } else if len(req.Body.Filters) != len(x.filters) { + sts.Code, sts.Message = status.InternalServerError, "[test] wrong number of filters" + } else { + var sf object.SearchFilter + for i := range req.Body.Filters { + if req.Body.Filters[i] == nil { + sts.Code, sts.Message = status.InternalServerError, fmt.Sprintf("invalid request: invalid body: nil filter #%d", i) + } else if err = sf.ReadFromV2(req.Body.Filters[i]); err != nil { + sts.Code, sts.Message = status.InternalServerError, fmt.Sprintf("invalid request: invalid body: invalid filter #%d: %v", i, err) + } else if object.FilterOp(req.Body.Filters[i].MatchType) != x.filters[i].Operation() { + sts.Code, sts.Message = status.InternalServerError, fmt.Sprintf("[test] wrong filter #%d op", i) + } else if req.Body.Filters[i].Key != x.filters[i].Key() { + sts.Code, sts.Message = status.InternalServerError, fmt.Sprintf("[test] wrong filter #%d key", i) + } else if req.Body.Filters[i].Value != x.filters[i].Value() { + sts.Code, sts.Message = status.InternalServerError, fmt.Sprintf("[test] wrong filter #%d value", i) + } + } + } + if sts.Code == 0 && x.session != nil { + var so session.Object + if req.MetaHeader.SessionToken == nil { + sts.Code, sts.Message = status.InternalServerError, "[test] missing session token" + } else if err = so.ReadFromV2(req.MetaHeader.SessionToken); err != nil { + sts.Code, sts.Message = status.InternalServerError, fmt.Sprintf("invalid request: invalid meta header: invalid session token: %v", err) + } else if !bytes.Equal(so.Marshal(), x.session.Marshal()) { + sts.Code, sts.Message = status.InternalServerError, "[test] session token in request differs with the input one" + } + } + if sts.Code == 0 && x.bearerToken != nil { + var bt bearer.Token + if req.MetaHeader.BearerToken == nil { + sts.Code, sts.Message = status.InternalServerError, "[test] missing bearer token" + } else if err = bt.ReadFromV2(req.MetaHeader.BearerToken); err != nil { + sts.Code, sts.Message = status.InternalServerError, fmt.Sprintf("invalid request: invalid meta header: invalid bearer token: %v", err) + } else if !bytes.Equal(bt.Marshal(), x.bearerToken.Marshal()) { + sts.Code, sts.Message = status.InternalServerError, "[test] bearer token in request differs with the input one" + } + } + metaHdr := &apisession.ResponseMetaHeader{Status: &sts, Epoch: x.epoch} + if sts.Code != 0 { + return x.sendResponse(stream, 0, &apiobject.SearchResponse{MetaHeader: metaHdr}) + } + if len(x.idLists) == 0 { + x.idLists = [][]oid.ID{{}} // to return empty list + } + for i := range x.idLists { + resp := apiobject.SearchResponse{ + Body: &apiobject.SearchResponse_Body{ + IdList: make([]*refs.ObjectID, len(x.idLists[i])), + }, + MetaHeader: metaHdr, + } + for j := range x.idLists[i] { + resp.Body.IdList[j] = new(refs.ObjectID) + x.idLists[i][j].WriteToV2(resp.Body.IdList[j]) + } + if err = x.sendResponse(stream, i, &resp); err != nil { + return err + } + } + return nil +} - t.Run("no objects", func(t *testing.T) { - p, resp := testListReaderResponse(t) - - resp.stream = newSearchStream(p, io.EOF, []oid.ID{}) - - var actual []oid.ID - require.NoError(t, resp.Iterate(func(id oid.ID) bool { - actual = append(actual, id) - return false - })) - require.Len(t, actual, 0) +func bindClientServerForObjectSearchWithOpts(t testing.TB, assertErr func(error), customizeOpts func(*Options)) (*searchObjectsServer, *Client, *bool) { + var srv searchObjectsServer + srv.sleepDur = 10 * time.Millisecond + srv.serverSigner = neofscryptotest.RandomSigner() + srv.latestVersion = versiontest.Version() + srv.nodeInfo = netmaptest.NodeInfo() + srv.nodeInfo.SetPublicKey(neofscrypto.PublicKeyBytes(srv.serverSigner.Public())) + usr, _ := usertest.TwoUsers() + srv.clientSigner = usr + srv.filters = make([]object.SearchFilter, 5) + for i := range srv.filters { + si := strconv.Itoa(i) + srv.filters[i] = object.NewSearchFilter("k"+si, object.FilterOp(i), "v"+si) + } + ids := oidtest.NIDs(10) + srv.idLists = [][]oid.ID{ids[:2], ids[2:6], ids[6:9], ids[9:]} + + var opts Options + var handlerCalled bool + opts.SetAPIRequestResultHandler(func(nodeKey []byte, endpoint string, op stat.Method, dur time.Duration, err error) { + handlerCalled = true + require.Equal(t, srv.nodeInfo.PublicKey(), nodeKey) + require.Equal(t, "localhost:8080", endpoint) + require.Equal(t, stat.MethodObjectSearch, op) + require.Greater(t, dur, srv.sleepDur) + assertErr(err) }) - t.Run("iterate all sequence", func(t *testing.T) { - p, resp := testListReaderResponse(t) + if customizeOpts != nil { + customizeOpts(&opts) + } - resp.stream = newSearchStream(p, io.EOF, ids[0:2], nil, ids[2:3]) + c, err := New(anyValidURI, opts) + require.NoError(t, err) - var actual []oid.ID - require.NoError(t, resp.Iterate(func(id oid.ID) bool { - actual = append(actual, id) - return false - })) - require.Equal(t, ids[:3], actual) - }) - t.Run("stop by return value", func(t *testing.T) { - p, resp := testListReaderResponse(t) - - var actual []oid.ID - resp.stream = &singleStreamResponder{signer: p, idList: [][]oid.ID{ids}} - require.NoError(t, resp.Iterate(func(id oid.ID) bool { - actual = append(actual, id) - return len(actual) == 2 - })) - require.Equal(t, ids[:2], actual) - }) - t.Run("stop after error", func(t *testing.T) { - p, resp := testListReaderResponse(t) - expectedErr := errors.New("test error") + conn := bufconn.Listen(10 << 10) + gs := grpc.NewServer() + apinetmap.RegisterNetmapServiceServer(gs, &srv) + apiobject.RegisterObjectServiceServer(gs, &srv) + go func() { _ = gs.Serve(conn) }() + t.Cleanup(gs.Stop) - resp.stream = newSearchStream(p, expectedErr, ids[:2]) + c.dial = func(context.Context, string) (net.Conn, error) { return conn.Dial() } + require.NoError(t, c.Dial(context.Background())) - var actual []oid.ID - err := resp.Iterate(func(id oid.ID) bool { - actual = append(actual, id) - return false - }) - require.True(t, errors.Is(err, expectedErr), "got: %v", err) - require.Equal(t, ids[:2], actual) - }) + return &srv, c, &handlerCalled } -func TestClient_ObjectSearch(t *testing.T) { - c := newClient(t, nil) - - t.Run("missing signer", func(t *testing.T) { - _, err := c.ObjectSearchInit(context.Background(), cid.ID{}, nil, PrmObjectSearch{}) - require.ErrorIs(t, err, ErrMissingSigner) - }) +func bindClientServerForObjectSearch(t testing.TB, assertErr func(error)) (*searchObjectsServer, *Client, *bool) { + return bindClientServerForObjectSearchWithOpts(t, assertErr, nil) } -func testListReaderResponse(t *testing.T) (neofscrypto.Signer, *ObjectListReader) { - return test.RandomSigner(t), &ObjectListReader{ - cancelCtxStream: func() {}, - client: &Client{}, - tail: nil, +func testObjectSearchingMethod(t *testing.T, call func(context.Context, *Client, cid.ID, neofscrypto.Signer, SelectObjectsOptions, []object.SearchFilter) ([]oid.ID, error)) { + ctx := context.Background() + firstNIDLists := func(all [][]oid.ID, n int) []oid.ID { + var res []oid.ID + for i := 0; i < n; i++ { + res = append(res, all[i]...) + } + return res } -} - -func newSearchStream(signer neofscrypto.Signer, endError error, idList ...[]oid.ID) *singleStreamResponder { - return &singleStreamResponder{ - signer: signer, - endError: endError, - idList: idList, + checkRes := func(t testing.TB, all [][]oid.ID, nResps int, res []oid.ID) { + require.Equal(t, firstNIDLists(all, nResps), res) } -} - -type singleStreamResponder struct { - signer neofscrypto.Signer - n int - endError error - idList [][]oid.ID -} - -func (s *singleStreamResponder) Read(resp *v2object.SearchResponse) error { - if s.n >= len(s.idList) { - if s.endError != nil { - return s.endError - } - return ErrUnexpectedReadCall + checkEmptyRes := func(t testing.TB, res []oid.ID) { + checkRes(t, nil, 0, res) } - - var body v2object.SearchResponseBody - - if s.idList[s.n] != nil { - ids := make([]refs.ObjectID, len(s.idList[s.n])) - for i := range s.idList[s.n] { - s.idList[s.n][i].WriteToV2(&ids[i]) + t.Run("invalid signer", func(t *testing.T) { + c, err := New(anyValidURI, Options{}) + require.NoError(t, err) + res, err := call(ctx, c, cidtest.ID(), nil, SelectObjectsOptions{}, nil) + require.ErrorIs(t, err, errMissingSigner) + checkEmptyRes(t, res) + }) + t.Run("OK", func(t *testing.T) { + for _, testCase := range []struct { + name string + setOpts func(srv *searchObjectsServer, opts *SelectObjectsOptions) + }{ + {name: "default", setOpts: func(srv *searchObjectsServer, opts *SelectObjectsOptions) {}}, + {name: "with session", setOpts: func(srv *searchObjectsServer, opts *SelectObjectsOptions) { + so := sessiontest.Object() + opts.WithinSession(so) + srv.session = &so + }}, + {name: "with bearer token", setOpts: func(srv *searchObjectsServer, opts *SelectObjectsOptions) { + bt := bearertest.Token() + opts.WithBearerToken(bt) + srv.bearerToken = &bt + }}, + {name: "no forwarding", setOpts: func(srv *searchObjectsServer, opts *SelectObjectsOptions) { + srv.local = true + opts.PreventForwarding() + }}, + } { + t.Run(testCase.name, func(t *testing.T) { + assertErr := func(err error) { require.NoError(t, err) } + srv, c, handlerCalled := bindClientServerForObjectSearch(t, assertErr) + var opts SelectObjectsOptions + testCase.setOpts(srv, &opts) + res, err := call(ctx, c, srv.cnr, srv.clientSigner, opts, srv.filters) + assertErr(err) + checkRes(t, srv.idLists, len(srv.idLists), res) + require.True(t, *handlerCalled) + + srv.idLists = nil + res, err = call(ctx, c, srv.cnr, srv.clientSigner, opts, srv.filters) + assertErr(err) + checkEmptyRes(t, res) + }) } - body.SetIDList(ids) - } - resp.SetBody(&body) + }) + t.Run("fail", func(t *testing.T) { + t.Run("wrong stream flow", func(t *testing.T) { + t.Run("empty stream", func(t *testing.T) { + assertErr := func(err error) { require.EqualError(t, err, "stream ended without a status response") } + srv, c, handlerCalled := bindClientServerForObjectSearch(t, assertErr) + srv.emptyStream = true + res, err := call(ctx, c, srv.cnr, srv.clientSigner, SelectObjectsOptions{}, srv.filters) + assertErr(err) + checkEmptyRes(t, res) + require.True(t, *handlerCalled) + }) + t.Run("message after status error", func(t *testing.T) { + assertErr := func(err error) { + require.EqualError(t, err, "stream is not completed after the message #2 which must be the last one") + } + srv, c, handlerCalled := bindClientServerForObjectSearch(t, assertErr) + srv.modifyResp = func(n int, r *apiobject.SearchResponse) error { + if n == 2 { + for r.MetaHeader.Status.Code == 0 { + r.MetaHeader.Status.Code = rand.Uint32() + } + } + return nil + } + res, err := call(ctx, c, srv.cnr, srv.clientSigner, SelectObjectsOptions{}, srv.filters) + assertErr(err) + require.True(t, *handlerCalled) + checkRes(t, srv.idLists, 2, res) + }) + t.Run("message after empty payload", func(t *testing.T) { + assertErr := func(err error) { + require.EqualError(t, err, "stream is not completed after the message #0 which must be the last one") + } + srv, c, handlerCalled := bindClientServerForObjectSearch(t, assertErr) + srv.modifyResp = func(n int, r *apiobject.SearchResponse) error { + if n == 0 { + r.Body = nil + } + return nil + } + res, err := call(ctx, c, srv.cnr, srv.clientSigner, SelectObjectsOptions{}, srv.filters) + assertErr(err) + checkEmptyRes(t, res) + require.True(t, *handlerCalled) + srv.modifyResp = func(n int, r *apiobject.SearchResponse) error { + if n == 0 { + r.Body.IdList = nil + } + return nil + } + res, err = call(ctx, c, srv.cnr, srv.clientSigner, SelectObjectsOptions{}, srv.filters) + assertErr(err) + checkEmptyRes(t, res) + srv.modifyResp = func(n int, r *apiobject.SearchResponse) error { + if n == 0 { + r.Body.IdList = []*refs.ObjectID{} + } + return nil + } + res, err = call(ctx, c, srv.cnr, srv.clientSigner, SelectObjectsOptions{}, srv.filters) + assertErr(err) + checkEmptyRes(t, res) + }) + }) + t.Run("sign request", func(t *testing.T) { + assertErr := func(err error) { require.ErrorContains(t, err, errSignRequest) } + srv, c, handlerCalled := bindClientServerForObjectSearch(t, assertErr) + srv.sleepDur = 0 + res, err := call(ctx, c, srv.cnr, neofscryptotest.FailSigner(srv.clientSigner), SelectObjectsOptions{}, srv.filters) + assertErr(err) + checkEmptyRes(t, res) + require.True(t, *handlerCalled) + }) + t.Run("transport", func(t *testing.T) { + assertErr := func(err error) { require.ErrorContains(t, err, errTransport) } + srv, c, handlerCalled := bindClientServerForObjectSearch(t, assertErr) + srv.sleepDur = 0 + require.NoError(t, c.conn.Close()) + res, err := call(ctx, c, srv.cnr, srv.clientSigner, SelectObjectsOptions{}, srv.filters) + assertErr(err) + checkEmptyRes(t, res) + require.True(t, *handlerCalled) + + for nResp := range srv.idLists { + assertErr = func(err error) { + require.ErrorContains(t, err, errTransport) + require.ErrorContains(t, err, "any transport error") + require.ErrorContains(t, err, fmt.Sprintf("while reading response #%d", nResp)) + } + srv, c, handlerCalled := bindClientServerForObjectSearch(t, assertErr) + srv.sleepDur = 0 + srv.modifyResp = func(n int, r *apiobject.SearchResponse) error { + if n == nResp { + return errors.New("any transport error") + } + return nil + } + res, err = call(ctx, c, srv.cnr, srv.clientSigner, SelectObjectsOptions{}, srv.filters) + assertErr(err) + checkRes(t, srv.idLists, nResp, res) + require.True(t, *handlerCalled) + } + }) + t.Run("invalid response signature", func(t *testing.T) { + for i, testCase := range []struct { + err string + corrupt func(*apiobject.SearchResponse) + }{ + {err: "missing verification header", + corrupt: func(r *apiobject.SearchResponse) { r.VerifyHeader = nil }, + }, + {err: "missing body signature", + corrupt: func(r *apiobject.SearchResponse) { r.VerifyHeader.BodySignature = nil }, + }, + {err: "missing signature of the meta header", + corrupt: func(r *apiobject.SearchResponse) { r.VerifyHeader.MetaSignature = nil }, + }, + {err: "missing signature of the origin verification header", + corrupt: func(r *apiobject.SearchResponse) { r.VerifyHeader.OriginSignature = nil }, + }, + {err: "verify body signature: missing public key", + corrupt: func(r *apiobject.SearchResponse) { r.VerifyHeader.BodySignature.Key = nil }, + }, + {err: "verify signature of the meta header: missing public key", + corrupt: func(r *apiobject.SearchResponse) { r.VerifyHeader.MetaSignature.Key = nil }, + }, + {err: "verify signature of the origin verification header: missing public key", + corrupt: func(r *apiobject.SearchResponse) { r.VerifyHeader.OriginSignature.Key = nil }, + }, + {err: "verify body signature: decode public key from binary", + corrupt: func(r *apiobject.SearchResponse) { + r.VerifyHeader.BodySignature.Key = []byte("not a public key") + }, + }, + {err: "verify signature of the meta header: decode public key from binary", + corrupt: func(r *apiobject.SearchResponse) { + r.VerifyHeader.MetaSignature.Key = []byte("not a public key") + }, + }, + {err: "verify signature of the origin verification header: decode public key from binary", + corrupt: func(r *apiobject.SearchResponse) { + r.VerifyHeader.OriginSignature.Key = []byte("not a public key") + }, + }, + {err: "verify body signature: invalid scheme -1", + corrupt: func(r *apiobject.SearchResponse) { r.VerifyHeader.BodySignature.Scheme = -1 }, + }, + {err: "verify body signature: unsupported scheme 3", + corrupt: func(r *apiobject.SearchResponse) { r.VerifyHeader.BodySignature.Scheme = 3 }, + }, + {err: "verify signature of the meta header: unsupported scheme 3", + corrupt: func(r *apiobject.SearchResponse) { r.VerifyHeader.MetaSignature.Scheme = 3 }, + }, + {err: "verify signature of the origin verification header: unsupported scheme 3", + corrupt: func(r *apiobject.SearchResponse) { r.VerifyHeader.OriginSignature.Scheme = 3 }, + }, + {err: "verify body signature: signature mismatch", + corrupt: func(r *apiobject.SearchResponse) { r.VerifyHeader.BodySignature.Sign[0]++ }, + }, + {err: "verify signature of the meta header: signature mismatch", + corrupt: func(r *apiobject.SearchResponse) { r.VerifyHeader.MetaSignature.Sign[0]++ }, + }, + {err: "verify signature of the origin verification header: signature mismatch", + corrupt: func(r *apiobject.SearchResponse) { r.VerifyHeader.OriginSignature.Sign[0]++ }, + }, + {err: "verify body signature: signature mismatch", + corrupt: func(r *apiobject.SearchResponse) { + r.VerifyHeader.BodySignature.Key = neofscrypto.PublicKeyBytes(neofscryptotest.RandomSigner().Public()) + }, + }, + {err: "verify signature of the meta header: signature mismatch", + corrupt: func(r *apiobject.SearchResponse) { + r.VerifyHeader.MetaSignature.Key = neofscrypto.PublicKeyBytes(neofscryptotest.RandomSigner().Public()) + }, + }, + {err: "verify signature of the origin verification header: signature mismatch", + corrupt: func(r *apiobject.SearchResponse) { + r.VerifyHeader.OriginSignature.Key = neofscrypto.PublicKeyBytes(neofscryptotest.RandomSigner().Public()) + }, + }, + } { + assertErr := func(err error) { + require.ErrorContains(t, err, "invalid response") + require.ErrorContains(t, err, errResponseSignature, [2]any{i, testCase.err}) + require.ErrorContains(t, err, testCase.err, [2]any{i, testCase.err}) + } + srv, c, handlerCalled := bindClientServerForObjectSearch(t, assertErr) + for nResp := range srv.idLists { + srv.corruptRespSig = func(n int, r *apiobject.SearchResponse) { + if n == nResp { + testCase.corrupt(r) + } + } + res, err := call(ctx, c, srv.cnr, srv.clientSigner, SelectObjectsOptions{}, srv.filters) + assertErr(err) + require.ErrorContains(t, err, fmt.Sprintf("invalid response #%d", nResp)) + checkRes(t, srv.idLists, nResp, res) + require.True(t, *handlerCalled) + } + } + }) + t.Run("invalid response status", func(t *testing.T) { + assertErr := func(err error) { + require.ErrorContains(t, err, "invalid response") + require.ErrorContains(t, err, errInvalidResponseStatus) + require.ErrorContains(t, err, "details attached but not supported") + } + srv, c, handlerCalled := bindClientServerForObjectSearch(t, assertErr) + for nResp := range srv.idLists { + srv.modifyResp = func(n int, r *apiobject.SearchResponse) error { + if n == nResp { + r.MetaHeader.Status = &status.Status{Code: status.InternalServerError, Details: make([]*status.Status_Detail, 1)} + } + return nil + } + res, err := call(ctx, c, srv.cnr, srv.clientSigner, SelectObjectsOptions{}, srv.filters) + assertErr(err) + require.ErrorContains(t, err, fmt.Sprintf("invalid response #%d", nResp)) + checkRes(t, srv.idLists, nResp, res) + require.True(t, *handlerCalled) + } + }) + t.Run("status errors", func(t *testing.T) { + for _, testCase := range []struct { + code uint32 + errConst error + errVar any + }{ + {code: 1 << 32 / 2}, + {code: status.InternalServerError, errConst: apistatus.ErrServerInternal, errVar: new(apistatus.InternalServerError)}, + {code: status.SignatureVerificationFail, errConst: apistatus.ErrSignatureVerification, errVar: new(apistatus.SignatureVerificationFailure)}, + {code: status.ContainerNotFound, errConst: apistatus.ErrContainerNotFound, errVar: new(apistatus.ContainerNotFound)}, + {code: status.ObjectAccessDenied, errConst: apistatus.ErrObjectAccessDenied, errVar: new(apistatus.ObjectAccessDenied)}, + {code: status.SessionTokenExpired, errConst: apistatus.ErrSessionTokenExpired, errVar: new(apistatus.SessionTokenExpired)}, + } { + assertErr := func(err error) { + require.ErrorIs(t, err, apistatus.Error, testCase) + require.ErrorContains(t, err, "any message", testCase) + if testCase.errConst != nil { + require.ErrorIs(t, err, testCase.errConst, testCase) + } + if testCase.errVar != nil { + require.ErrorAs(t, err, testCase.errVar, testCase) + } + } + srv, c, handlerCalled := bindClientServerForObjectSearch(t, assertErr) + srv.modifyResp = func(n int, r *apiobject.SearchResponse) error { + if n == len(srv.idLists)-1 { + r.MetaHeader.Status = &status.Status{Code: testCase.code, Message: "any message"} + } + return nil + } + res, err := call(ctx, c, srv.cnr, srv.clientSigner, SelectObjectsOptions{}, srv.filters) + assertErr(err) + checkRes(t, srv.idLists, len(srv.idLists)-1, res) + require.True(t, *handlerCalled, testCase) + } + }) + t.Run("response body", func(t *testing.T) { + t.Run("missing", func(t *testing.T) { + assertErr := func(err error) { + require.ErrorContains(t, err, "invalid response") + require.ErrorContains(t, err, "empty object ID list is only allowed in the first stream message") + } + srv, c, handlerCalled := bindClientServerForObjectSearch(t, assertErr) + for nResp := 1; nResp < len(srv.idLists); nResp++ { + srv.modifyResp = func(n int, r *apiobject.SearchResponse) error { + if n == nResp { + r.Body = nil + } + return nil + } + res, err := call(ctx, c, srv.cnr, srv.clientSigner, SelectObjectsOptions{}, srv.filters) + assertErr(err) + require.ErrorContains(t, err, fmt.Sprintf("invalid response #%d", nResp)) + checkRes(t, srv.idLists, nResp, res) + require.True(t, *handlerCalled) + srv.modifyResp = func(n int, r *apiobject.SearchResponse) error { + if n == nResp { + r.Body.IdList = nil + } + return nil + } + res, err = call(ctx, c, srv.cnr, srv.clientSigner, SelectObjectsOptions{}, srv.filters) + assertErr(err) + checkRes(t, srv.idLists, nResp, res) + srv.modifyResp = func(n int, r *apiobject.SearchResponse) error { + if n == nResp { + r.Body.IdList = []*refs.ObjectID{} + } + return nil + } + res, err = call(ctx, c, srv.cnr, srv.clientSigner, SelectObjectsOptions{}, srv.filters) + assertErr(err) + checkRes(t, srv.idLists, nResp, res) + } + }) + t.Run("ID list", func(t *testing.T) { + validID1 := oidtest.ID() + validID2 := oidtest.ChangeID(validID1) + var mValidID1, mValidID2 refs.ObjectID + validID1.WriteToV2(&mValidID1) + validID2.WriteToV2(&mValidID2) + for i, testCase := range []struct { + err string + corrupt func(*apiobject.SearchResponse_Body) + }{ + {err: "invalid element #1: missing value field", corrupt: func(body *apiobject.SearchResponse_Body) { + body.IdList = []*refs.ObjectID{&mValidID1, nil, &mValidID2} + }}, + {err: "invalid element #1: missing value field", corrupt: func(body *apiobject.SearchResponse_Body) { + body.IdList = []*refs.ObjectID{&mValidID1, {Value: nil}, &mValidID2} + }}, + {err: "invalid element #1: missing value field", corrupt: func(body *apiobject.SearchResponse_Body) { + body.IdList = []*refs.ObjectID{&mValidID1, {Value: []byte{}}, &mValidID2} + }}, + {err: "invalid element #1: invalid value length 31", corrupt: func(body *apiobject.SearchResponse_Body) { + body.IdList = []*refs.ObjectID{&mValidID1, {Value: make([]byte, 31)}, &mValidID2} + }}, + } { + assertErr := func(err error) { + require.ErrorContains(t, err, "invalid response", i) + require.ErrorContains(t, err, fmt.Sprintf(": invalid body: invalid field (object ID list): %s", testCase.err), i) + } + srv, c, handlerCalled := bindClientServerForObjectSearch(t, assertErr) + for nResp := range srv.idLists { + srv.modifyResp = func(n int, r *apiobject.SearchResponse) error { + if n == nResp { + testCase.corrupt(r.Body) + } + return nil + } + res, err := call(ctx, c, srv.cnr, srv.clientSigner, SelectObjectsOptions{}, srv.filters) + assertErr(err) + require.ErrorContains(t, err, fmt.Sprintf("invalid response #%d", nResp), i) + require.Equal(t, append(firstNIDLists(srv.idLists, nResp), validID1), res) + require.NotContains(t, res, validID2) + require.True(t, *handlerCalled, testCase.err, i) + } + } + }) + }) + }) + t.Run("response info handler", func(t *testing.T) { + t.Run("OK", func(t *testing.T) { + assertErr := func(err error) { require.NoError(t, err) } + var r ResponseMetaInfo + callCounter := 0 + srv, c, reqHandlerCalled := bindClientServerForObjectSearchWithOpts(t, assertErr, func(opts *Options) { + opts.SetAPIResponseInfoInterceptor(func(info ResponseMetaInfo) error { + callCounter++ + r = info + return nil + }) + }) + srv.epoch = 3598503 + res, err := call(ctx, c, srv.cnr, srv.clientSigner, SelectObjectsOptions{}, srv.filters) + assertErr(err) + checkRes(t, srv.idLists, len(srv.idLists), res) + require.EqualValues(t, 2, callCounter) // + on dial + require.Equal(t, srv.epoch, r.Epoch()) + require.Equal(t, neofscrypto.PublicKeyBytes(srv.serverSigner.Public()), r.ResponderKey()) + require.True(t, *reqHandlerCalled) + }) + t.Run("fail", func(t *testing.T) { + assertErr := func(err error) { require.EqualError(t, err, "intercept response info: some handler error") } + callCounter := 0 + var r ResponseMetaInfo + srv, c, handlerCalled := bindClientServerForObjectSearchWithOpts(t, assertErr, func(opts *Options) { + opts.SetAPIResponseInfoInterceptor(func(info ResponseMetaInfo) error { + callCounter++ + if callCounter == 1 { // dial + return nil + } + r = info + return errors.New("some handler error") + }) + }) + srv.epoch = 4386380643 + res, err := call(ctx, c, srv.cnr, srv.clientSigner, SelectObjectsOptions{}, srv.filters) + assertErr(err) + checkEmptyRes(t, res) + require.EqualValues(t, 2, callCounter) // + on dial + require.Equal(t, srv.epoch, r.Epoch()) + require.Equal(t, neofscrypto.PublicKeyBytes(srv.serverSigner.Public()), r.ResponderKey()) + require.True(t, *handlerCalled) + }) + }) +} - err := signServiceMessage(s.signer, resp, nil) - if err != nil { - return err - } +func TestClient_SelectObjects(t *testing.T) { + testObjectSearchingMethod(t, func(ctx context.Context, c *Client, cnr cid.ID, signer neofscrypto.Signer, opts SelectObjectsOptions, filters []object.SearchFilter) ([]oid.ID, error) { + return c.SelectObjects(ctx, cnr, signer, opts, filters) + }) +} - s.n++ - return nil +func TestClient_ForEachSelectedObject(t *testing.T) { + testObjectSearchingMethod(t, func(ctx context.Context, c *Client, cnr cid.ID, signer neofscrypto.Signer, opts SelectObjectsOptions, filters []object.SearchFilter) ([]oid.ID, error) { + var res []oid.ID + return res, c.ForEachSelectedObject(ctx, cnr, signer, opts, filters, func(id oid.ID) bool { + res = append(res, id) + return true + }) + }) + t.Run("break", func(t *testing.T) { + assertErr := func(err error) { require.NoError(t, err) } + srv, c, _ := bindClientServerForObjectSearch(t, assertErr) + srv.idLists = [][]oid.ID{oidtest.NIDs(2), oidtest.NIDs(3), oidtest.NIDs(1)} + callCounter := 0 + err := c.ForEachSelectedObject(context.Background(), srv.cnr, srv.clientSigner, SelectObjectsOptions{}, srv.filters, func(id oid.ID) bool { + switch callCounter { + default: + return false // break + case 0: + require.Equal(t, srv.idLists[0][0], id) + case 1: + require.Equal(t, srv.idLists[0][1], id) + case 2: + require.Equal(t, srv.idLists[1][0], id) + case 3: + require.Equal(t, srv.idLists[1][1], id) + } + callCounter++ + return true // continue + }) + assertErr(err) + require.EqualValues(t, 4, callCounter) + }) } diff --git a/client/object_test.go b/client/object_test.go new file mode 100644 index 000000000..0fc3c113e --- /dev/null +++ b/client/object_test.go @@ -0,0 +1,146 @@ +package client + +import ( + "context" + "math" + + apiobject "github.com/nspcc-dev/neofs-sdk-go/api/object" + "github.com/nspcc-dev/neofs-sdk-go/api/refs" + apisession "github.com/nspcc-dev/neofs-sdk-go/api/session" +) + +type noOtherObjectCalls struct{} + +func (noOtherObjectCalls) Get(*apiobject.GetRequest, apiobject.ObjectService_GetServer) error { + panic("must not be called") +} + +func (noOtherObjectCalls) Put(apiobject.ObjectService_PutServer) error { + panic("must not be called") +} + +func (noOtherObjectCalls) Delete(context.Context, *apiobject.DeleteRequest) (*apiobject.DeleteResponse, error) { + panic("must not be called") +} + +func (noOtherObjectCalls) Head(context.Context, *apiobject.HeadRequest) (*apiobject.HeadResponse, error) { + panic("must not be called") +} + +func (noOtherObjectCalls) Search(*apiobject.SearchRequest, apiobject.ObjectService_SearchServer) error { + panic("must not be called") +} + +func (noOtherObjectCalls) GetRange(*apiobject.GetRangeRequest, apiobject.ObjectService_GetRangeServer) error { + panic("must not be called") +} + +func (noOtherObjectCalls) GetRangeHash(context.Context, *apiobject.GetRangeHashRequest) (*apiobject.GetRangeHashResponse, error) { + panic("must not be called") +} + +func (noOtherObjectCalls) Replicate(context.Context, *apiobject.ReplicateRequest) (*apiobject.ReplicateResponse, error) { + panic("must not be called") +} + +var invalidObjectHeaderTestCases = []struct { + err string + corrupt func(*apiobject.Header) +}{ + {err: "invalid type field -1", corrupt: func(h *apiobject.Header) { h.ObjectType = -1 }}, + {err: "missing container", corrupt: func(h *apiobject.Header) { h.ContainerId = nil }}, + {err: "invalid container: missing value field", corrupt: func(h *apiobject.Header) { h.ContainerId.Value = nil }}, + {err: "invalid container: invalid value length 31", corrupt: func(h *apiobject.Header) { h.ContainerId.Value = make([]byte, 31) }}, + {err: "missing owner", corrupt: func(h *apiobject.Header) { h.OwnerId = nil }}, + {err: "invalid owner: missing value field", corrupt: func(h *apiobject.Header) { h.OwnerId.Value = nil }}, + {err: "invalid owner: invalid value length 24", corrupt: func(h *apiobject.Header) { h.OwnerId.Value = make([]byte, 24) }}, + {err: "invalid owner: invalid prefix byte 0x42, expected 0x35", corrupt: func(h *apiobject.Header) { h.OwnerId.Value[0] = 0x42 }}, + {err: "invalid owner: value checksum mismatch", corrupt: func(h *apiobject.Header) { h.OwnerId.Value[len(h.OwnerId.Value)-1]++ }}, + {err: "invalid payload checksum: missing value", corrupt: func(h *apiobject.Header) { h.PayloadHash.Sum = nil }}, + {err: "invalid payload homomorphic checksum: missing value", corrupt: func(h *apiobject.Header) { h.HomomorphicHash.Sum = nil }}, + {err: "invalid session: missing token body", corrupt: func(h *apiobject.Header) { h.SessionToken.Body = nil }}, + {err: "invalid session: missing session ID", corrupt: func(h *apiobject.Header) { h.SessionToken.Body.Id = nil }}, + {err: "invalid session: missing session ID", corrupt: func(h *apiobject.Header) { h.SessionToken.Body.Id = []byte{} }}, + {err: "invalid session: invalid session ID: invalid UUID (got 15 bytes)", corrupt: func(h *apiobject.Header) { h.SessionToken.Body.Id = make([]byte, 15) }}, + {err: "invalid session: invalid session UUID version 3", corrupt: func(h *apiobject.Header) { h.SessionToken.Body.Id[6] = 3 << 4 }}, + {err: "invalid session: missing session issuer", corrupt: func(h *apiobject.Header) { h.SessionToken.Body.OwnerId = nil }}, + {err: "invalid session: invalid session issuer: missing value field", corrupt: func(h *apiobject.Header) { h.SessionToken.Body.OwnerId.Value = nil }}, + {err: "invalid session: invalid session issuer: missing value field", corrupt: func(h *apiobject.Header) { h.SessionToken.Body.OwnerId.Value = []byte{} }}, + {err: "invalid session: invalid session issuer: invalid value length 26", corrupt: func(h *apiobject.Header) { h.SessionToken.Body.OwnerId.Value = make([]byte, 26) }}, + {err: "invalid session: invalid session issuer: invalid prefix byte 0x43, expected 0x35", corrupt: func(h *apiobject.Header) { h.SessionToken.Body.OwnerId.Value[0] = 0x43 }}, + {err: "invalid session: invalid session issuer: value checksum mismatch", corrupt: func(h *apiobject.Header) { + h.SessionToken.Body.OwnerId.Value[len(h.SessionToken.Body.OwnerId.Value)-1]++ + }}, + {err: "invalid session: missing token lifetime", corrupt: func(h *apiobject.Header) { h.SessionToken.Body.Lifetime = nil }}, + {err: "invalid session: missing session public key", corrupt: func(h *apiobject.Header) { h.SessionToken.Body.SessionKey = nil }}, + {err: "invalid session: missing session public key", corrupt: func(h *apiobject.Header) { h.SessionToken.Body.SessionKey = []byte{} }}, + {err: "invalid session: invalid body signature: missing public key", corrupt: func(h *apiobject.Header) { h.SessionToken.Signature.Key = nil }}, + {err: "invalid session: invalid body signature: missing public key", corrupt: func(h *apiobject.Header) { h.SessionToken.Signature.Key = []byte{} }}, + {err: "invalid session: invalid body signature: decode public key from binary", corrupt: func(h *apiobject.Header) { h.SessionToken.Signature.Key = make([]byte, 32) }}, + {err: "invalid session: invalid body signature: missing signature", corrupt: func(h *apiobject.Header) { h.SessionToken.Signature.Sign = nil }}, + {err: "invalid session: invalid body signature: missing signature", corrupt: func(h *apiobject.Header) { h.SessionToken.Signature.Sign = []byte{} }}, + {err: "invalid session: invalid body signature: unsupported scheme 2147483647", corrupt: func(h *apiobject.Header) { h.SessionToken.Signature.Scheme = math.MaxInt32 }}, + {err: "invalid session: missing session context", corrupt: func(h *apiobject.Header) { h.SessionToken.Body.Context = nil }}, + {err: "invalid session: wrong context field", corrupt: func(h *apiobject.Header) { h.SessionToken.Body.Context = new(apisession.SessionToken_Body_Container) }}, + {err: "invalid session: invalid context: invalid target container: missing value field", corrupt: func(h *apiobject.Header) { + h.SessionToken.Body.Context.(*apisession.SessionToken_Body_Object).Object.Target.Container.Value = nil + }}, + {err: "invalid session: invalid context: invalid target container: missing value field", corrupt: func(h *apiobject.Header) { + h.SessionToken.Body.Context.(*apisession.SessionToken_Body_Object).Object.Target.Container.Value = []byte{} + }}, + {err: "invalid session: invalid context: invalid target container: invalid value length 31", corrupt: func(h *apiobject.Header) { + h.SessionToken.Body.Context.(*apisession.SessionToken_Body_Object).Object.Target.Container.Value = make([]byte, 31) + }}, + {err: "invalid session: invalid context: invalid target object #1: missing value field", corrupt: func(h *apiobject.Header) { + h.SessionToken.Body.Context.(*apisession.SessionToken_Body_Object).Object.Target.Objects = []*refs.ObjectID{ + {Value: make([]byte, 32)}, {Value: nil}} + }}, + {err: "invalid session: invalid context: invalid target object #1: missing value field", corrupt: func(h *apiobject.Header) { + h.SessionToken.Body.Context.(*apisession.SessionToken_Body_Object).Object.Target.Objects = []*refs.ObjectID{ + {Value: make([]byte, 32)}, {Value: nil}} + }}, + {err: "invalid session: invalid context: invalid target object #1: invalid value length 31", corrupt: func(h *apiobject.Header) { + h.SessionToken.Body.Context.(*apisession.SessionToken_Body_Object).Object.Target.Objects = []*refs.ObjectID{ + {Value: make([]byte, 32)}, {Value: make([]byte, 31)}} + }}, + {err: "invalid split-chain ID: wrong length 15", corrupt: func(h *apiobject.Header) { h.Split.SplitId = make([]byte, 15) }}, + {err: "invalid split-chain ID: wrong version #3", corrupt: func(h *apiobject.Header) { + h.Split.SplitId = make([]byte, 16) + h.Split.SplitId[6] = 3 << 4 + }}, + {err: "invalid parent ID: missing value field", corrupt: func(h *apiobject.Header) { h.Split.Parent.Value = nil }}, + {err: "invalid parent ID: missing value field", corrupt: func(h *apiobject.Header) { h.Split.Parent.Value = []byte{} }}, + {err: "invalid parent ID: invalid value length 31", corrupt: func(h *apiobject.Header) { h.Split.Parent.Value = make([]byte, 31) }}, + {err: "invalid previous split-chain element: missing value field", corrupt: func(h *apiobject.Header) { h.Split.Previous.Value = nil }}, + {err: "invalid previous split-chain element: missing value field", corrupt: func(h *apiobject.Header) { h.Split.Previous.Value = []byte{} }}, + {err: "invalid previous split-chain element: invalid value length 31", corrupt: func(h *apiobject.Header) { h.Split.Previous.Value = make([]byte, 31) }}, + {err: "invalid parent signature: missing public key", corrupt: func(h *apiobject.Header) { h.Split.ParentSignature.Key = nil }}, + {err: "invalid parent signature: missing public key", corrupt: func(h *apiobject.Header) { h.Split.ParentSignature.Key = []byte{} }}, + {err: "invalid parent signature: decode public key from binary", corrupt: func(h *apiobject.Header) { h.Split.ParentSignature.Key = make([]byte, 32) }}, + {err: "invalid parent signature: missing signature", corrupt: func(h *apiobject.Header) { h.Split.ParentSignature.Sign = nil }}, + {err: "invalid parent signature: missing signature", corrupt: func(h *apiobject.Header) { h.Split.ParentSignature.Sign = []byte{} }}, + {err: "invalid parent signature: unsupported scheme 2147483647", corrupt: func(h *apiobject.Header) { h.Split.ParentSignature.Scheme = math.MaxInt32 }}, + {err: "invalid child split-chain element #1: missing value field", corrupt: func(h *apiobject.Header) { h.Split.Children = []*refs.ObjectID{{Value: make([]byte, 32)}, nil} }}, + {err: "invalid child split-chain element #1: missing value field", corrupt: func(h *apiobject.Header) { h.Split.Children = []*refs.ObjectID{{Value: make([]byte, 32)}, {}} }}, + {err: "invalid child split-chain element #1: invalid value length 31", corrupt: func(h *apiobject.Header) { + h.Split.Children = []*refs.ObjectID{{Value: make([]byte, 32)}, {Value: make([]byte, 31)}} + }}, + {err: "invalid first split-chain element: missing value field", corrupt: func(h *apiobject.Header) { h.Split.First.Value = nil }}, + {err: "invalid first split-chain element: missing value field", corrupt: func(h *apiobject.Header) { h.Split.First.Value = []byte{} }}, + {err: "invalid first split-chain element: invalid value length 31", corrupt: func(h *apiobject.Header) { h.Split.First.Value = make([]byte, 31) }}, + {err: "invalid attribute #1: missing key", corrupt: func(h *apiobject.Header) { + h.Attributes = []*apiobject.Header_Attribute{{Key: "k1", Value: "v1"}, {Key: "", Value: "v2"}} + }}, + {err: "invalid attribute #1 (k2): missing value", corrupt: func(h *apiobject.Header) { + h.Attributes = []*apiobject.Header_Attribute{{Key: "k1", Value: "v1"}, {Key: "k2", Value: ""}} + }}, + {err: "multiple attributes with key=k2", corrupt: func(h *apiobject.Header) { + h.Attributes = []*apiobject.Header_Attribute{{Key: "k1", Value: "v1"}, {Key: "k2", Value: "v2"}, {Key: "k3", Value: "v3"}, {Key: "k2", Value: "v4"}} + }}, + {err: "invalid expiration attribute (#1): invalid integer", corrupt: func(h *apiobject.Header) { + h.Attributes = []*apiobject.Header_Attribute{{Key: "k1", Value: "v1"}, {Key: "__NEOFS__EXPIRATION_EPOCH", Value: "not a number"}} + }}, + {err: "invalid timestamp attribute (#1): invalid integer", corrupt: func(h *apiobject.Header) { + h.Attributes = []*apiobject.Header_Attribute{{Key: "k1", Value: "v1"}, {Key: "Timestamp", Value: "not a number"}} + }}, +} diff --git a/client/reputation.go b/client/reputation.go index a0e70e3b0..fe4d2ab04 100644 --- a/client/reputation.go +++ b/client/reputation.go @@ -2,161 +2,153 @@ package client import ( "context" + "errors" + "fmt" + "time" - v2reputation "github.com/nspcc-dev/neofs-api-go/v2/reputation" - rpcapi "github.com/nspcc-dev/neofs-api-go/v2/rpc" - "github.com/nspcc-dev/neofs-api-go/v2/rpc/client" + apireputation "github.com/nspcc-dev/neofs-sdk-go/api/reputation" + apistatus "github.com/nspcc-dev/neofs-sdk-go/client/status" + neofscrypto "github.com/nspcc-dev/neofs-sdk-go/crypto" "github.com/nspcc-dev/neofs-sdk-go/reputation" "github.com/nspcc-dev/neofs-sdk-go/stat" ) -var ( - // special variables for test purposes only, to overwrite real RPC calls. - rpcAPIAnnounceIntermediateResult = rpcapi.AnnounceIntermediateResult - rpcAPIAnnounceLocalTrust = rpcapi.AnnounceLocalTrust -) - -// PrmAnnounceLocalTrust groups optional parameters of AnnounceLocalTrust operation. -type PrmAnnounceLocalTrust struct { - prmCommonMeta -} +// SendLocalTrustsOptions groups optional parameters of [Client.SendLocalTrusts] +// operation. +type SendLocalTrustsOptions struct{} -// AnnounceLocalTrust sends client's trust values to the NeoFS network participants. -// -// Any errors (local or remote, including returned status codes) are returned as Go errors, -// see [apistatus] package for NeoFS-specific error types. -// -// Context is required and must not be nil. It is used for network communication. -// -// Return errors: -// - [ErrZeroEpoch] -// - [ErrMissingTrusts] +// SendLocalTrusts sends client's trust values to the NeoFS network participants +// collected for a given epoch. The trust set must not be empty. // -// Parameter epoch must not be zero. -// Parameter trusts must not be empty. -func (c *Client) AnnounceLocalTrust(ctx context.Context, epoch uint64, trusts []reputation.Trust, prm PrmAnnounceLocalTrust) error { - var err error - defer func() { - c.sendStatistic(stat.MethodAnnounceLocalTrust, err)() - }() - - // check parameters - switch { - case epoch == 0: - err = ErrZeroEpoch - return err - case len(trusts) == 0: - err = ErrMissingTrusts - return err +// SendLocalTrusts is used for system needs and is not intended to be called by +// regular users. +func (c *Client) SendLocalTrusts(ctx context.Context, epoch uint64, trusts []reputation.Trust, _ SendLocalTrustsOptions) error { + if len(trusts) == 0 { + return errors.New("missing trusts") } - // form request body - reqBody := new(v2reputation.AnnounceLocalTrustRequestBody) - reqBody.SetEpoch(epoch) - - trustList := make([]v2reputation.Trust, len(trusts)) - - for i := range trusts { - trusts[i].WriteToV2(&trustList[i]) + var err error + if c.handleAPIOpResult != nil { + defer func(start time.Time) { + c.handleAPIOpResult(c.serverPubKey, c.endpoint, stat.MethodAnnounceLocalTrust, time.Since(start), err) + }(time.Now()) } - reqBody.SetTrusts(trustList) - // form request - var req v2reputation.AnnounceLocalTrustRequest - - req.SetBody(reqBody) - - // init call context - - var ( - cc contextCall - ) - - c.initCallContext(&cc) - cc.meta = prm.prmCommonMeta - cc.req = &req - cc.call = func() (responseV2, error) { - return rpcAPIAnnounceLocalTrust(&c.c, &req, client.WithContext(ctx)) + req := &apireputation.AnnounceLocalTrustRequest{ + Body: &apireputation.AnnounceLocalTrustRequest_Body{ + Epoch: epoch, + Trusts: make([]*apireputation.Trust, len(trusts)), + }, } - - // process call - if !cc.processCall() { - err = cc.err + for i := range trusts { + req.Body.Trusts[i] = new(apireputation.Trust) + trusts[i].WriteToV2(req.Body.Trusts[i]) + } + // FIXME: balance requests need small fixed-size buffers for encoding, its makes + // no sense to mosh them with other buffers + buf := c.signBuffers.Get().(*[]byte) + defer c.signBuffers.Put(buf) + if req.VerifyHeader, err = neofscrypto.SignRequest(c.signer, req, req.Body, *buf); err != nil { + err = fmt.Errorf("%s: %w", errSignRequest, err) // for closure above return err } - return nil -} + // send request + resp, err := c.transport.reputation.AnnounceLocalTrust(ctx, req) + if err != nil { + err = fmt.Errorf("%s: %w", errTransport, err) // for closure above + return err + } -// PrmAnnounceIntermediateTrust groups optional parameters of AnnounceIntermediateTrust operation. -type PrmAnnounceIntermediateTrust struct { - prmCommonMeta + // intercept response info + if c.interceptAPIRespInfo != nil { + if err = c.interceptAPIRespInfo(ResponseMetaInfo{ + key: resp.GetVerifyHeader().GetBodySignature().GetKey(), + epoch: resp.GetMetaHeader().GetEpoch(), + }); err != nil { + err = fmt.Errorf("%s: %w", errInterceptResponseInfo, err) // for closure above + return err + } + } - iter uint32 + // verify response integrity + if err = neofscrypto.VerifyResponse(resp, resp.Body); err != nil { + err = fmt.Errorf("%s: %w", errResponseSignature, err) // for closure above + return err + } + sts, err := apistatus.ErrorFromV2(resp.GetMetaHeader().GetStatus()) + if err != nil { + err = fmt.Errorf("%s: %w", errInvalidResponseStatus, err) // for closure above + } else if sts != nil { + err = sts // for closure above + } + return err } -// SetIteration sets current sequence number of the client's calculation algorithm. -// By default, corresponds to initial (zero) iteration. -func (x *PrmAnnounceIntermediateTrust) SetIteration(iter uint32) { - x.iter = iter -} +// SendIntermediateTrustOptions groups optional parameters of [Client.SendIntermediateTrust] +// operation. +type SendIntermediateTrustOptions struct{} -// AnnounceIntermediateTrust sends global trust values calculated for the specified NeoFS network participants -// at some stage of client's calculation algorithm. -// -// Any errors (local or remote, including returned status codes) are returned as Go errors, -// see [apistatus] package for NeoFS-specific error types. -// -// Context is required and must not be nil. It is used for network communication. +// SendIntermediateTrust sends global trust value calculated for the specified +// NeoFS network participant at given stage of client's iteration algorithm. // -// Return errors: -// - [ErrZeroEpoch] -// -// Parameter epoch must not be zero. -func (c *Client) AnnounceIntermediateTrust(ctx context.Context, epoch uint64, trust reputation.PeerToPeerTrust, prm PrmAnnounceIntermediateTrust) error { +// SendIntermediateTrust is used for system needs and is not intended to be +// called by regular users. +func (c *Client) SendIntermediateTrust(ctx context.Context, epoch uint64, iter uint32, trust reputation.PeerToPeerTrust, _ SendIntermediateTrustOptions) error { var err error - defer func() { - c.sendStatistic(stat.MethodAnnounceIntermediateTrust, err)() - }() - - if epoch == 0 { - err = ErrZeroEpoch - return err + if c.handleAPIOpResult != nil { + defer func(start time.Time) { + c.handleAPIOpResult(c.serverPubKey, c.endpoint, stat.MethodAnnounceIntermediateTrust, time.Since(start), err) + }(time.Now()) } - var v2Trust v2reputation.PeerToPeerTrust - trust.WriteToV2(&v2Trust) - - // form request body - reqBody := new(v2reputation.AnnounceIntermediateResultRequestBody) - reqBody.SetEpoch(epoch) - reqBody.SetIteration(prm.iter) - reqBody.SetTrust(&v2Trust) - // form request - var req v2reputation.AnnounceIntermediateResultRequest - - req.SetBody(reqBody) - - // init call context + req := &apireputation.AnnounceIntermediateResultRequest{ + Body: &apireputation.AnnounceIntermediateResultRequest_Body{ + Epoch: epoch, + Iteration: iter, + Trust: new(apireputation.PeerToPeerTrust), + }, + } + trust.WriteToV2(req.Body.Trust) + // FIXME: balance requests need small fixed-size buffers for encoding, its makes + // no sense to mosh them with other buffers + buf := c.signBuffers.Get().(*[]byte) + defer c.signBuffers.Put(buf) + if req.VerifyHeader, err = neofscrypto.SignRequest(c.signer, req, req.Body, *buf); err != nil { + err = fmt.Errorf("%s: %w", errSignRequest, err) // for closure above + return err + } - var ( - cc contextCall - ) + // send request + resp, err := c.transport.reputation.AnnounceIntermediateResult(ctx, req) + if err != nil { + err = fmt.Errorf("%s: %w", errTransport, err) // for closure above + return err + } - c.initCallContext(&cc) - cc.meta = prm.prmCommonMeta - cc.req = &req - cc.call = func() (responseV2, error) { - return rpcAPIAnnounceIntermediateResult(&c.c, &req, client.WithContext(ctx)) + // intercept response info + if c.interceptAPIRespInfo != nil { + if err = c.interceptAPIRespInfo(ResponseMetaInfo{ + key: resp.GetVerifyHeader().GetBodySignature().GetKey(), + epoch: resp.GetMetaHeader().GetEpoch(), + }); err != nil { + err = fmt.Errorf("%s: %w", errInterceptResponseInfo, err) // for closure above + return err + } } - // process call - if !cc.processCall() { - err = cc.err + // verify response integrity + if err = neofscrypto.VerifyResponse(resp, resp.Body); err != nil { + err = fmt.Errorf("%s: %w", errResponseSignature, err) // for closure above return err } - - return nil + sts, err := apistatus.ErrorFromV2(resp.GetMetaHeader().GetStatus()) + if err != nil { + err = fmt.Errorf("%s: %w", errInvalidResponseStatus, err) // for closure above + } else if sts != nil { + err = sts // for closure above + } + return err } diff --git a/client/reputation_test.go b/client/reputation_test.go new file mode 100644 index 000000000..f3065e6ec --- /dev/null +++ b/client/reputation_test.go @@ -0,0 +1,711 @@ +package client + +import ( + "bytes" + "context" + "errors" + "fmt" + "math/rand" + "net" + "testing" + "time" + + apinetmap "github.com/nspcc-dev/neofs-sdk-go/api/netmap" + "github.com/nspcc-dev/neofs-sdk-go/api/refs" + apireputation "github.com/nspcc-dev/neofs-sdk-go/api/reputation" + apisession "github.com/nspcc-dev/neofs-sdk-go/api/session" + "github.com/nspcc-dev/neofs-sdk-go/api/status" + apistatus "github.com/nspcc-dev/neofs-sdk-go/client/status" + neofscrypto "github.com/nspcc-dev/neofs-sdk-go/crypto" + neofscryptotest "github.com/nspcc-dev/neofs-sdk-go/crypto/test" + netmaptest "github.com/nspcc-dev/neofs-sdk-go/netmap/test" + "github.com/nspcc-dev/neofs-sdk-go/reputation" + reputationtest "github.com/nspcc-dev/neofs-sdk-go/reputation/test" + "github.com/nspcc-dev/neofs-sdk-go/stat" + versiontest "github.com/nspcc-dev/neofs-sdk-go/version/test" + "github.com/stretchr/testify/require" + "google.golang.org/grpc" + "google.golang.org/grpc/test/bufconn" +) + +type noOtherReputationCalls struct{} + +func (noOtherReputationCalls) AnnounceIntermediateResult(context.Context, *apireputation.AnnounceIntermediateResultRequest) (*apireputation.AnnounceIntermediateResultResponse, error) { + panic("must not be called") +} + +func (x noOtherReputationCalls) AnnounceLocalTrust(context.Context, *apireputation.AnnounceLocalTrustRequest) (*apireputation.AnnounceLocalTrustResponse, error) { + panic("must not be called") +} + +type sendLocalTrustsServer struct { + noOtherReputationCalls + // client + epoch uint64 + trusts []reputation.Trust + clientSigScheme neofscrypto.Scheme + clientPubKey []byte + // server + sleepDur time.Duration + endpointInfoOnDialServer + errTransport error + modifyResp func(*apireputation.AnnounceLocalTrustResponse) + corruptRespSig func(*apireputation.AnnounceLocalTrustResponse) +} + +func (x sendLocalTrustsServer) AnnounceLocalTrust(ctx context.Context, req *apireputation.AnnounceLocalTrustRequest) (*apireputation.AnnounceLocalTrustResponse, error) { + if x.sleepDur > 0 { + time.Sleep(x.sleepDur) + } + if x.errTransport != nil { + return nil, x.errTransport + } + var sts status.Status + resp := apireputation.AnnounceLocalTrustResponse{ + MetaHeader: &apisession.ResponseMetaHeader{Status: &sts, Epoch: x.endpointInfoOnDialServer.epoch}, + } + var err error + if ctx == nil { + sts.Code, sts.Message = status.InternalServerError, "nil context" + } else if req == nil { + sts.Code, sts.Message = status.InternalServerError, "nil request" + } else if err = neofscrypto.VerifyRequest(req, req.Body); err != nil { + sts.Code, sts.Message = status.SignatureVerificationFail, err.Error() + } else if req.VerifyHeader.BodySignature.Scheme != refs.SignatureScheme(x.clientSigScheme) || + !bytes.Equal(req.VerifyHeader.BodySignature.Key, x.clientPubKey) { + sts.Code, sts.Message = status.InternalServerError, "[test] unexpected request body signature credentials" + } else if req.VerifyHeader.MetaSignature.Scheme != refs.SignatureScheme(x.clientSigScheme) || + !bytes.Equal(req.VerifyHeader.MetaSignature.Key, x.clientPubKey) { + sts.Code, sts.Message = status.InternalServerError, "[test] unexpected request meta header signature credentials" + } else if req.VerifyHeader.OriginSignature.Scheme != refs.SignatureScheme(x.clientSigScheme) || + !bytes.Equal(req.VerifyHeader.OriginSignature.Key, x.clientPubKey) { + sts.Code, sts.Message = status.InternalServerError, "[test] unexpected origin request verification header signature credentials" + } else if req.MetaHeader != nil { + sts.Code, sts.Message = status.InternalServerError, "invalid request: meta header is set" + } else if req.Body == nil { + sts.Code, sts.Message = status.InternalServerError, "invalid request: missing body" + } else if req.Body.Epoch != x.epoch { + sts.Code, sts.Message = status.InternalServerError, "[test] invalid request: invalid body: wrong epoch" + } else if len(req.Body.Trusts) == 0 { + sts.Code, sts.Message = status.InternalServerError, "invalid request: invalid body: missing trusts" + } else { + var tr reputation.Trust + for i := range req.Body.Trusts { + if req.Body.Trusts[i] == nil { + sts.Code, sts.Message = status.InternalServerError, fmt.Sprintf("invalid request: invalid body: nil trust #%d", i) + } else if err = tr.ReadFromV2(req.Body.Trusts[i]); err != nil { + sts.Code, sts.Message = status.InternalServerError, fmt.Sprintf("invalid request: invalid body: invalid trust #%d: %v", i, err) + } else if tr != x.trusts[i] { + sts.Code, sts.Message = status.InternalServerError, fmt.Sprintf("[test] invalid request: invalid body: wrong trust #%d", i) + } + } + } + if x.modifyResp != nil { + x.modifyResp(&resp) + } + resp.VerifyHeader, err = neofscrypto.SignResponse(x.serverSigner, &resp, resp.Body, nil) + if err != nil { + return nil, fmt.Errorf("sign response: %w", err) + } + if x.corruptRespSig != nil { + x.corruptRespSig(&resp) + } + return &resp, nil +} + +func TestClient_SendLocalTrusts(t *testing.T) { + ctx := context.Background() + var srv sendLocalTrustsServer + srv.sleepDur = 10 * time.Millisecond + srv.serverSigner = neofscryptotest.RandomSigner() + srv.latestVersion = versiontest.Version() + srv.nodeInfo = netmaptest.NodeInfo() + srv.nodeInfo.SetPublicKey(neofscrypto.PublicKeyBytes(srv.serverSigner.Public())) + srv.epoch = rand.Uint64() + srv.trusts = reputationtest.NTrusts(3) + _dial := func(t testing.TB, srv *sendLocalTrustsServer, assertErr func(error), customizeOpts func(*Options)) (*Client, *bool) { + var opts Options + var handlerCalled bool + opts.SetAPIRequestResultHandler(func(nodeKey []byte, endpoint string, op stat.Method, dur time.Duration, err error) { + handlerCalled = true + require.Equal(t, srv.nodeInfo.PublicKey(), nodeKey) + require.Equal(t, "localhost:8080", endpoint) + require.Equal(t, stat.MethodAnnounceLocalTrust, op) + require.Greater(t, dur, srv.sleepDur) + assertErr(err) + }) + if customizeOpts != nil { + customizeOpts(&opts) + } + + c, err := New(anyValidURI, opts) + require.NoError(t, err) + srv.clientSigScheme = c.signer.Scheme() + srv.clientPubKey = neofscrypto.PublicKeyBytes(c.signer.Public()) + + conn := bufconn.Listen(10 << 10) + gs := grpc.NewServer() + apinetmap.RegisterNetmapServiceServer(gs, srv) + apireputation.RegisterReputationServiceServer(gs, srv) + go func() { _ = gs.Serve(conn) }() + t.Cleanup(gs.Stop) + + c.dial = func(ctx context.Context, _ string) (net.Conn, error) { return conn.DialContext(ctx) } + require.NoError(t, c.Dial(ctx)) + + return c, &handlerCalled + } + dial := func(t testing.TB, srv *sendLocalTrustsServer, assertErr func(error)) (*Client, *bool) { + return _dial(t, srv, assertErr, nil) + } + t.Run("missing trusts", func(t *testing.T) { + c, err := New(anyValidURI, Options{}) + require.NoError(t, err) + err = c.SendLocalTrusts(ctx, srv.epoch, nil, SendLocalTrustsOptions{}) + require.EqualError(t, err, "missing trusts") + err = c.SendLocalTrusts(ctx, srv.epoch, []reputation.Trust{}, SendLocalTrustsOptions{}) + require.EqualError(t, err, "missing trusts") + }) + t.Run("OK", func(t *testing.T) { + srv := srv + assertErr := func(err error) { require.NoError(t, err) } + c, handlerCalled := dial(t, &srv, assertErr) + err := c.SendLocalTrusts(ctx, srv.epoch, srv.trusts, SendLocalTrustsOptions{}) + assertErr(err) + require.True(t, *handlerCalled) + }) + t.Run("fail", func(t *testing.T) { + t.Run("sign request", func(t *testing.T) { + srv := srv + srv.sleepDur = 0 + assertErr := func(err error) { require.ErrorContains(t, err, errSignRequest) } + c, handlerCalled := dial(t, &srv, assertErr) + c.signer = neofscryptotest.FailSigner(c.signer) + err := c.SendLocalTrusts(ctx, srv.epoch, srv.trusts, SendLocalTrustsOptions{}) + assertErr(err) + require.True(t, *handlerCalled) + }) + t.Run("transport", func(t *testing.T) { + srv := srv + srv.errTransport = errors.New("any transport failure") + assertErr := func(err error) { + require.ErrorContains(t, err, errTransport) + require.ErrorContains(t, err, "any transport failure") + } + c, handlerCalled := dial(t, &srv, assertErr) + err := c.SendLocalTrusts(ctx, srv.epoch, srv.trusts, SendLocalTrustsOptions{}) + assertErr(err) + require.True(t, *handlerCalled) + }) + t.Run("invalid response signature", func(t *testing.T) { + for i, testCase := range []struct { + err string + corrupt func(*apireputation.AnnounceLocalTrustResponse) + }{ + {err: "missing verification header", + corrupt: func(r *apireputation.AnnounceLocalTrustResponse) { r.VerifyHeader = nil }, + }, + {err: "missing body signature", + corrupt: func(r *apireputation.AnnounceLocalTrustResponse) { r.VerifyHeader.BodySignature = nil }, + }, + {err: "missing signature of the meta header", + corrupt: func(r *apireputation.AnnounceLocalTrustResponse) { r.VerifyHeader.MetaSignature = nil }, + }, + {err: "missing signature of the origin verification header", + corrupt: func(r *apireputation.AnnounceLocalTrustResponse) { r.VerifyHeader.OriginSignature = nil }, + }, + {err: "verify body signature: missing public key", + corrupt: func(r *apireputation.AnnounceLocalTrustResponse) { r.VerifyHeader.BodySignature.Key = nil }, + }, + {err: "verify signature of the meta header: missing public key", + corrupt: func(r *apireputation.AnnounceLocalTrustResponse) { r.VerifyHeader.MetaSignature.Key = nil }, + }, + {err: "verify signature of the origin verification header: missing public key", + corrupt: func(r *apireputation.AnnounceLocalTrustResponse) { r.VerifyHeader.OriginSignature.Key = nil }, + }, + {err: "verify body signature: decode public key from binary", + corrupt: func(r *apireputation.AnnounceLocalTrustResponse) { + r.VerifyHeader.BodySignature.Key = []byte("not a public key") + }, + }, + {err: "verify signature of the meta header: decode public key from binary", + corrupt: func(r *apireputation.AnnounceLocalTrustResponse) { + r.VerifyHeader.MetaSignature.Key = []byte("not a public key") + }, + }, + {err: "verify signature of the origin verification header: decode public key from binary", + corrupt: func(r *apireputation.AnnounceLocalTrustResponse) { + r.VerifyHeader.OriginSignature.Key = []byte("not a public key") + }, + }, + {err: "verify body signature: invalid scheme -1", + corrupt: func(r *apireputation.AnnounceLocalTrustResponse) { r.VerifyHeader.BodySignature.Scheme = -1 }, + }, + {err: "verify body signature: unsupported scheme 3", + corrupt: func(r *apireputation.AnnounceLocalTrustResponse) { r.VerifyHeader.BodySignature.Scheme = 3 }, + }, + {err: "verify signature of the meta header: unsupported scheme 3", + corrupt: func(r *apireputation.AnnounceLocalTrustResponse) { r.VerifyHeader.MetaSignature.Scheme = 3 }, + }, + {err: "verify signature of the origin verification header: unsupported scheme 3", + corrupt: func(r *apireputation.AnnounceLocalTrustResponse) { r.VerifyHeader.OriginSignature.Scheme = 3 }, + }, + {err: "verify body signature: signature mismatch", + corrupt: func(r *apireputation.AnnounceLocalTrustResponse) { r.VerifyHeader.BodySignature.Sign[0]++ }, + }, + {err: "verify signature of the meta header: signature mismatch", + corrupt: func(r *apireputation.AnnounceLocalTrustResponse) { r.VerifyHeader.MetaSignature.Sign[0]++ }, + }, + {err: "verify signature of the origin verification header: signature mismatch", + corrupt: func(r *apireputation.AnnounceLocalTrustResponse) { r.VerifyHeader.OriginSignature.Sign[0]++ }, + }, + {err: "verify body signature: signature mismatch", + corrupt: func(r *apireputation.AnnounceLocalTrustResponse) { + r.VerifyHeader.BodySignature.Key = neofscrypto.PublicKeyBytes(neofscryptotest.RandomSigner().Public()) + }, + }, + {err: "verify signature of the meta header: signature mismatch", + corrupt: func(r *apireputation.AnnounceLocalTrustResponse) { + r.VerifyHeader.MetaSignature.Key = neofscrypto.PublicKeyBytes(neofscryptotest.RandomSigner().Public()) + }, + }, + {err: "verify signature of the origin verification header: signature mismatch", + corrupt: func(r *apireputation.AnnounceLocalTrustResponse) { + r.VerifyHeader.OriginSignature.Key = neofscrypto.PublicKeyBytes(neofscryptotest.RandomSigner().Public()) + }, + }, + } { + srv := srv + srv.corruptRespSig = testCase.corrupt + assertErr := func(err error) { + require.ErrorContains(t, err, errResponseSignature, [2]any{i, testCase}) + require.ErrorContains(t, err, testCase.err, [2]any{i, testCase}) + } + c, handlerCalled := dial(t, &srv, assertErr) + err := c.SendLocalTrusts(ctx, srv.epoch, srv.trusts, SendLocalTrustsOptions{}) + assertErr(err) + require.True(t, *handlerCalled) + } + }) + t.Run("invalid response status", func(t *testing.T) { + srv := srv + srv.modifyResp = func(r *apireputation.AnnounceLocalTrustResponse) { + r.MetaHeader.Status = &status.Status{Code: status.InternalServerError, Details: make([]*status.Status_Detail, 1)} + } + assertErr := func(err error) { + require.ErrorContains(t, err, errInvalidResponseStatus) + require.ErrorContains(t, err, "details attached but not supported") + } + c, handlerCalled := dial(t, &srv, assertErr) + err := c.SendLocalTrusts(ctx, srv.epoch, srv.trusts, SendLocalTrustsOptions{}) + assertErr(err) + require.True(t, *handlerCalled) + }) + t.Run("status errors", func(t *testing.T) { + for _, testCase := range []struct { + code uint32 + errConst error + errVar any + }{ + {code: 1 << 32 / 2}, + {code: status.InternalServerError, errConst: apistatus.ErrServerInternal, errVar: new(apistatus.InternalServerError)}, + {code: status.SignatureVerificationFail, errConst: apistatus.ErrSignatureVerification, errVar: new(apistatus.SignatureVerificationFailure)}, + } { + srv := srv + srv.modifyResp = func(r *apireputation.AnnounceLocalTrustResponse) { + r.MetaHeader.Status = &status.Status{Code: testCase.code, Message: "any message"} + } + assertErr := func(err error) { + require.ErrorIs(t, err, apistatus.Error, testCase) + require.ErrorContains(t, err, "any message", testCase) + if testCase.errConst != nil { + require.ErrorIs(t, err, testCase.errConst, testCase) + } + if testCase.errVar != nil { + require.ErrorAs(t, err, testCase.errVar, testCase) + } + } + c, handlerCalled := dial(t, &srv, assertErr) + err := c.SendLocalTrusts(ctx, srv.epoch, srv.trusts, SendLocalTrustsOptions{}) + assertErr(err) + require.True(t, *handlerCalled, testCase) + } + }) + }) + t.Run("response info handler", func(t *testing.T) { + t.Run("OK", func(t *testing.T) { + srv := srv + srv.endpointInfoOnDialServer.epoch = 3598503 + assertErr := func(err error) { require.NoError(t, err) } + respHandlerCalled := false + c, reqHandlerCalled := _dial(t, &srv, assertErr, func(opts *Options) { + opts.SetAPIResponseInfoInterceptor(func(info ResponseMetaInfo) error { + respHandlerCalled = true + require.EqualValues(t, 3598503, info.Epoch()) + require.Equal(t, neofscrypto.PublicKeyBytes(srv.serverSigner.Public()), info.ResponderKey()) + return nil + }) + }) + err := c.SendLocalTrusts(ctx, srv.epoch, srv.trusts, SendLocalTrustsOptions{}) + assertErr(err) + require.True(t, respHandlerCalled) + require.True(t, *reqHandlerCalled) + }) + t.Run("fail", func(t *testing.T) { + srv := srv + srv.endpointInfoOnDialServer.epoch = 4386380643 + assertErr := func(err error) { require.ErrorContains(t, err, "intercept response info: some handler error") } + respHandlerCalled := false + c, reqHandlerCalled := _dial(t, &srv, assertErr, func(opts *Options) { + opts.SetAPIResponseInfoInterceptor(func(info ResponseMetaInfo) error { + if !respHandlerCalled { // dial + respHandlerCalled = true + return nil + } + require.EqualValues(t, 4386380643, info.Epoch()) + require.Equal(t, neofscrypto.PublicKeyBytes(srv.serverSigner.Public()), info.ResponderKey()) + return errors.New("some handler error") + }) + }) + err := c.SendLocalTrusts(ctx, srv.epoch, srv.trusts, SendLocalTrustsOptions{}) + assertErr(err) + require.True(t, respHandlerCalled) + require.True(t, *reqHandlerCalled) + }) + }) +} + +type sendIntermediateTrustServer struct { + noOtherReputationCalls + // client + epoch uint64 + iter uint32 + trust reputation.PeerToPeerTrust + clientSigScheme neofscrypto.Scheme + clientPubKey []byte + // server + sleepDur time.Duration + endpointInfoOnDialServer + errTransport error + modifyResp func(*apireputation.AnnounceIntermediateResultResponse) + corruptRespSig func(*apireputation.AnnounceIntermediateResultResponse) +} + +func (x sendIntermediateTrustServer) AnnounceIntermediateResult(ctx context.Context, req *apireputation.AnnounceIntermediateResultRequest) (*apireputation.AnnounceIntermediateResultResponse, error) { + if x.sleepDur > 0 { + time.Sleep(x.sleepDur) + } + if x.errTransport != nil { + return nil, x.errTransport + } + var sts status.Status + resp := apireputation.AnnounceIntermediateResultResponse{ + MetaHeader: &apisession.ResponseMetaHeader{Status: &sts, Epoch: x.endpointInfoOnDialServer.epoch}, + } + var err error + var tr reputation.PeerToPeerTrust + if ctx == nil { + sts.Code, sts.Message = status.InternalServerError, "nil context" + } else if req == nil { + sts.Code, sts.Message = status.InternalServerError, "nil request" + } else if err = neofscrypto.VerifyRequest(req, req.Body); err != nil { + sts.Code, sts.Message = status.SignatureVerificationFail, err.Error() + } else if req.VerifyHeader.BodySignature.Scheme != refs.SignatureScheme(x.clientSigScheme) || + !bytes.Equal(req.VerifyHeader.BodySignature.Key, x.clientPubKey) { + sts.Code, sts.Message = status.InternalServerError, "[test] unexpected request body signature credentials" + } else if req.VerifyHeader.MetaSignature.Scheme != refs.SignatureScheme(x.clientSigScheme) || + !bytes.Equal(req.VerifyHeader.MetaSignature.Key, x.clientPubKey) { + sts.Code, sts.Message = status.InternalServerError, "[test] unexpected request meta header signature credentials" + } else if req.VerifyHeader.OriginSignature.Scheme != refs.SignatureScheme(x.clientSigScheme) || + !bytes.Equal(req.VerifyHeader.OriginSignature.Key, x.clientPubKey) { + sts.Code, sts.Message = status.InternalServerError, "[test] unexpected origin request verification header signature credentials" + } else if req.MetaHeader != nil { + sts.Code, sts.Message = status.InternalServerError, "invalid request: meta header is set" + } else if req.Body == nil { + sts.Code, sts.Message = status.InternalServerError, "invalid request: missing body" + } else if req.Body.Epoch != x.epoch { + sts.Code, sts.Message = status.InternalServerError, "[test] invalid request: invalid body: wrong epoch" + } else if req.Body.Iteration != x.iter { + sts.Code, sts.Message = status.InternalServerError, "[test] invalid request: invalid body: wrong iteration" + } else if req.Body.Trust == nil { + sts.Code, sts.Message = status.InternalServerError, "invalid request: invalid body: missing trust" + } else if err = tr.ReadFromV2(req.Body.Trust); err != nil { + sts.Code, sts.Message = status.InternalServerError, fmt.Sprintf("[test] invalid request: invalid body: invalid trust: %v", err) + } else if tr != x.trust { + sts.Code, sts.Message = status.InternalServerError, "[test] invalid request: invalid body: wrong trust" + } + if x.modifyResp != nil { + x.modifyResp(&resp) + } + resp.VerifyHeader, err = neofscrypto.SignResponse(x.serverSigner, &resp, resp.Body, nil) + if err != nil { + return nil, fmt.Errorf("sign response: %w", err) + } + if x.corruptRespSig != nil { + x.corruptRespSig(&resp) + } + return &resp, nil +} + +func TestClient_SendIntermediateTrust(t *testing.T) { + ctx := context.Background() + var srv sendIntermediateTrustServer + srv.sleepDur = 10 * time.Millisecond + srv.serverSigner = neofscryptotest.RandomSigner() + srv.latestVersion = versiontest.Version() + srv.nodeInfo = netmaptest.NodeInfo() + srv.nodeInfo.SetPublicKey(neofscrypto.PublicKeyBytes(srv.serverSigner.Public())) + srv.epoch = rand.Uint64() + srv.iter = rand.Uint32() + srv.trust = reputationtest.PeerToPeerTrust() + _dial := func(t testing.TB, srv *sendIntermediateTrustServer, assertErr func(error), customizeOpts func(*Options)) (*Client, *bool) { + var opts Options + var handlerCalled bool + opts.SetAPIRequestResultHandler(func(nodeKey []byte, endpoint string, op stat.Method, dur time.Duration, err error) { + handlerCalled = true + require.Equal(t, srv.nodeInfo.PublicKey(), nodeKey) + require.Equal(t, "localhost:8080", endpoint) + require.Equal(t, stat.MethodAnnounceIntermediateTrust, op) + require.Greater(t, dur, srv.sleepDur) + assertErr(err) + }) + if customizeOpts != nil { + customizeOpts(&opts) + } + + c, err := New(anyValidURI, opts) + require.NoError(t, err) + srv.clientSigScheme = c.signer.Scheme() + srv.clientPubKey = neofscrypto.PublicKeyBytes(c.signer.Public()) + + conn := bufconn.Listen(10 << 10) + gs := grpc.NewServer() + apinetmap.RegisterNetmapServiceServer(gs, srv) + apireputation.RegisterReputationServiceServer(gs, srv) + go func() { _ = gs.Serve(conn) }() + t.Cleanup(gs.Stop) + + c.dial = func(ctx context.Context, _ string) (net.Conn, error) { return conn.DialContext(ctx) } + require.NoError(t, c.Dial(ctx)) + + return c, &handlerCalled + } + dial := func(t testing.TB, srv *sendIntermediateTrustServer, assertErr func(error)) (*Client, *bool) { + return _dial(t, srv, assertErr, nil) + } + t.Run("missing trusts", func(t *testing.T) { + c, err := New(anyValidURI, Options{}) + require.NoError(t, err) + err = c.SendLocalTrusts(ctx, srv.epoch, nil, SendLocalTrustsOptions{}) + require.EqualError(t, err, "missing trusts") + err = c.SendLocalTrusts(ctx, srv.epoch, []reputation.Trust{}, SendLocalTrustsOptions{}) + require.EqualError(t, err, "missing trusts") + }) + t.Run("OK", func(t *testing.T) { + srv := srv + assertErr := func(err error) { require.NoError(t, err) } + c, handlerCalled := dial(t, &srv, assertErr) + err := c.SendIntermediateTrust(ctx, srv.epoch, srv.iter, srv.trust, SendIntermediateTrustOptions{}) + assertErr(err) + require.True(t, *handlerCalled) + }) + t.Run("fail", func(t *testing.T) { + t.Run("sign request", func(t *testing.T) { + srv := srv + srv.sleepDur = 0 + assertErr := func(err error) { require.ErrorContains(t, err, errSignRequest) } + c, handlerCalled := dial(t, &srv, assertErr) + c.signer = neofscryptotest.FailSigner(c.signer) + err := c.SendIntermediateTrust(ctx, srv.epoch, srv.iter, srv.trust, SendIntermediateTrustOptions{}) + assertErr(err) + require.True(t, *handlerCalled) + }) + t.Run("transport", func(t *testing.T) { + srv := srv + srv.errTransport = errors.New("any transport failure") + assertErr := func(err error) { + require.ErrorContains(t, err, errTransport) + require.ErrorContains(t, err, "any transport failure") + } + c, handlerCalled := dial(t, &srv, assertErr) + err := c.SendIntermediateTrust(ctx, srv.epoch, srv.iter, srv.trust, SendIntermediateTrustOptions{}) + assertErr(err) + require.True(t, *handlerCalled) + }) + t.Run("invalid response signature", func(t *testing.T) { + for i, testCase := range []struct { + err string + corrupt func(*apireputation.AnnounceIntermediateResultResponse) + }{ + {err: "missing verification header", + corrupt: func(r *apireputation.AnnounceIntermediateResultResponse) { r.VerifyHeader = nil }, + }, + {err: "missing body signature", + corrupt: func(r *apireputation.AnnounceIntermediateResultResponse) { r.VerifyHeader.BodySignature = nil }, + }, + {err: "missing signature of the meta header", + corrupt: func(r *apireputation.AnnounceIntermediateResultResponse) { r.VerifyHeader.MetaSignature = nil }, + }, + {err: "missing signature of the origin verification header", + corrupt: func(r *apireputation.AnnounceIntermediateResultResponse) { r.VerifyHeader.OriginSignature = nil }, + }, + {err: "verify body signature: missing public key", + corrupt: func(r *apireputation.AnnounceIntermediateResultResponse) { r.VerifyHeader.BodySignature.Key = nil }, + }, + {err: "verify signature of the meta header: missing public key", + corrupt: func(r *apireputation.AnnounceIntermediateResultResponse) { r.VerifyHeader.MetaSignature.Key = nil }, + }, + {err: "verify signature of the origin verification header: missing public key", + corrupt: func(r *apireputation.AnnounceIntermediateResultResponse) { r.VerifyHeader.OriginSignature.Key = nil }, + }, + {err: "verify body signature: decode public key from binary", + corrupt: func(r *apireputation.AnnounceIntermediateResultResponse) { + r.VerifyHeader.BodySignature.Key = []byte("not a public key") + }, + }, + {err: "verify signature of the meta header: decode public key from binary", + corrupt: func(r *apireputation.AnnounceIntermediateResultResponse) { + r.VerifyHeader.MetaSignature.Key = []byte("not a public key") + }, + }, + {err: "verify signature of the origin verification header: decode public key from binary", + corrupt: func(r *apireputation.AnnounceIntermediateResultResponse) { + r.VerifyHeader.OriginSignature.Key = []byte("not a public key") + }, + }, + {err: "verify body signature: invalid scheme -1", + corrupt: func(r *apireputation.AnnounceIntermediateResultResponse) { r.VerifyHeader.BodySignature.Scheme = -1 }, + }, + {err: "verify body signature: unsupported scheme 3", + corrupt: func(r *apireputation.AnnounceIntermediateResultResponse) { r.VerifyHeader.BodySignature.Scheme = 3 }, + }, + {err: "verify signature of the meta header: unsupported scheme 3", + corrupt: func(r *apireputation.AnnounceIntermediateResultResponse) { r.VerifyHeader.MetaSignature.Scheme = 3 }, + }, + {err: "verify signature of the origin verification header: unsupported scheme 3", + corrupt: func(r *apireputation.AnnounceIntermediateResultResponse) { r.VerifyHeader.OriginSignature.Scheme = 3 }, + }, + {err: "verify body signature: signature mismatch", + corrupt: func(r *apireputation.AnnounceIntermediateResultResponse) { r.VerifyHeader.BodySignature.Sign[0]++ }, + }, + {err: "verify signature of the meta header: signature mismatch", + corrupt: func(r *apireputation.AnnounceIntermediateResultResponse) { r.VerifyHeader.MetaSignature.Sign[0]++ }, + }, + {err: "verify signature of the origin verification header: signature mismatch", + corrupt: func(r *apireputation.AnnounceIntermediateResultResponse) { r.VerifyHeader.OriginSignature.Sign[0]++ }, + }, + {err: "verify body signature: signature mismatch", + corrupt: func(r *apireputation.AnnounceIntermediateResultResponse) { + r.VerifyHeader.BodySignature.Key = neofscrypto.PublicKeyBytes(neofscryptotest.RandomSigner().Public()) + }, + }, + {err: "verify signature of the meta header: signature mismatch", + corrupt: func(r *apireputation.AnnounceIntermediateResultResponse) { + r.VerifyHeader.MetaSignature.Key = neofscrypto.PublicKeyBytes(neofscryptotest.RandomSigner().Public()) + }, + }, + {err: "verify signature of the origin verification header: signature mismatch", + corrupt: func(r *apireputation.AnnounceIntermediateResultResponse) { + r.VerifyHeader.OriginSignature.Key = neofscrypto.PublicKeyBytes(neofscryptotest.RandomSigner().Public()) + }, + }, + } { + srv := srv + srv.corruptRespSig = testCase.corrupt + assertErr := func(err error) { + require.ErrorContains(t, err, errResponseSignature, [2]any{i, testCase}) + require.ErrorContains(t, err, testCase.err, [2]any{i, testCase}) + } + c, handlerCalled := dial(t, &srv, assertErr) + err := c.SendIntermediateTrust(ctx, srv.epoch, srv.iter, srv.trust, SendIntermediateTrustOptions{}) + assertErr(err) + require.True(t, *handlerCalled) + } + }) + t.Run("invalid response status", func(t *testing.T) { + srv := srv + srv.modifyResp = func(r *apireputation.AnnounceIntermediateResultResponse) { + r.MetaHeader.Status = &status.Status{Code: status.InternalServerError, Details: make([]*status.Status_Detail, 1)} + } + assertErr := func(err error) { + require.ErrorContains(t, err, errInvalidResponseStatus) + require.ErrorContains(t, err, "details attached but not supported") + } + c, handlerCalled := dial(t, &srv, assertErr) + err := c.SendIntermediateTrust(ctx, srv.epoch, srv.iter, srv.trust, SendIntermediateTrustOptions{}) + assertErr(err) + require.True(t, *handlerCalled) + }) + t.Run("status errors", func(t *testing.T) { + for _, testCase := range []struct { + code uint32 + errConst error + errVar any + }{ + {code: 1 << 32 / 2}, + {code: status.InternalServerError, errConst: apistatus.ErrServerInternal, errVar: new(apistatus.InternalServerError)}, + {code: status.SignatureVerificationFail, errConst: apistatus.ErrSignatureVerification, errVar: new(apistatus.SignatureVerificationFailure)}, + } { + srv := srv + srv.modifyResp = func(r *apireputation.AnnounceIntermediateResultResponse) { + r.MetaHeader.Status = &status.Status{Code: testCase.code, Message: "any message"} + } + assertErr := func(err error) { + require.ErrorIs(t, err, apistatus.Error, testCase) + require.ErrorContains(t, err, "any message", testCase) + if testCase.errConst != nil { + require.ErrorIs(t, err, testCase.errConst, testCase) + } + if testCase.errVar != nil { + require.ErrorAs(t, err, testCase.errVar, testCase) + } + } + c, handlerCalled := dial(t, &srv, assertErr) + err := c.SendIntermediateTrust(ctx, srv.epoch, srv.iter, srv.trust, SendIntermediateTrustOptions{}) + assertErr(err) + require.True(t, *handlerCalled, testCase) + } + }) + }) + t.Run("response info handler", func(t *testing.T) { + t.Run("OK", func(t *testing.T) { + srv := srv + srv.endpointInfoOnDialServer.epoch = 3598503 + assertErr := func(err error) { require.NoError(t, err) } + respHandlerCalled := false + c, reqHandlerCalled := _dial(t, &srv, assertErr, func(opts *Options) { + opts.SetAPIResponseInfoInterceptor(func(info ResponseMetaInfo) error { + respHandlerCalled = true + require.EqualValues(t, 3598503, info.Epoch()) + require.Equal(t, neofscrypto.PublicKeyBytes(srv.serverSigner.Public()), info.ResponderKey()) + return nil + }) + }) + err := c.SendIntermediateTrust(ctx, srv.epoch, srv.iter, srv.trust, SendIntermediateTrustOptions{}) + assertErr(err) + require.True(t, respHandlerCalled) + require.True(t, *reqHandlerCalled) + }) + t.Run("fail", func(t *testing.T) { + srv := srv + srv.endpointInfoOnDialServer.epoch = 4386380643 + assertErr := func(err error) { require.ErrorContains(t, err, "intercept response info: some handler error") } + respHandlerCalled := false + c, reqHandlerCalled := _dial(t, &srv, assertErr, func(opts *Options) { + opts.SetAPIResponseInfoInterceptor(func(info ResponseMetaInfo) error { + if !respHandlerCalled { // dial + respHandlerCalled = true + return nil + } + require.EqualValues(t, 4386380643, info.Epoch()) + require.Equal(t, neofscrypto.PublicKeyBytes(srv.serverSigner.Public()), info.ResponderKey()) + return errors.New("some handler error") + }) + }) + err := c.SendIntermediateTrust(ctx, srv.epoch, srv.iter, srv.trust, SendIntermediateTrustOptions{}) + assertErr(err) + require.True(t, respHandlerCalled) + require.True(t, *reqHandlerCalled) + }) + }) +} diff --git a/client/response.go b/client/response.go index 934864ebd..f809e49ef 100644 --- a/client/response.go +++ b/client/response.go @@ -1,7 +1,5 @@ package client -import "github.com/nspcc-dev/neofs-api-go/v2/session" - // ResponseMetaInfo groups meta information about any NeoFS API response. type ResponseMetaInfo struct { key []byte @@ -9,11 +7,6 @@ type ResponseMetaInfo struct { epoch uint64 } -type responseV2 interface { - GetMetaHeader() *session.ResponseMetaHeader - GetVerificationHeader() *session.ResponseVerificationHeader -} - // ResponderKey returns responder's public key in a binary format. // // The resulting slice of bytes is a serialized compressed public key. See [elliptic.MarshalCompressed]. diff --git a/client/session.go b/client/session.go index d13f36914..c2403349c 100644 --- a/client/session.go +++ b/client/session.go @@ -2,143 +2,128 @@ package client import ( "context" - - "github.com/nspcc-dev/neofs-api-go/v2/refs" - "github.com/nspcc-dev/neofs-api-go/v2/rpc/client" - v2session "github.com/nspcc-dev/neofs-api-go/v2/session" + "errors" + "fmt" + "time" + + "github.com/google/uuid" + "github.com/nspcc-dev/neofs-sdk-go/api/refs" + apisession "github.com/nspcc-dev/neofs-sdk-go/api/session" + apistatus "github.com/nspcc-dev/neofs-sdk-go/client/status" + neofscrypto "github.com/nspcc-dev/neofs-sdk-go/crypto" + neofsecdsa "github.com/nspcc-dev/neofs-sdk-go/crypto/ecdsa" "github.com/nspcc-dev/neofs-sdk-go/stat" "github.com/nspcc-dev/neofs-sdk-go/user" ) -// PrmSessionCreate groups parameters of SessionCreate operation. -type PrmSessionCreate struct { - prmCommonMeta - - exp uint64 -} +// StartSessionOptions groups optional parameters of [Client.StartSession]. +type StartSessionOptions struct{} -// SetExp sets number of the last NepFS epoch in the lifetime of the session after which it will be expired. -func (x *PrmSessionCreate) SetExp(exp uint64) { - x.exp = exp +// SessionData is a result of [Client.StartSession]. +type SessionData struct { + // Unique identifier of the session. + ID uuid.UUID + // Public session key authenticating the subject. + PublicKey neofscrypto.PublicKey } -// ResSessionCreate groups resulting values of SessionCreate operation. -type ResSessionCreate struct { - id []byte - - sessionKey []byte -} - -// NewResSessionCreate is a constructor for NewResSessionCreate. -func NewResSessionCreate(id []byte, sessionKey []byte) ResSessionCreate { - return ResSessionCreate{ - id: id, - sessionKey: sessionKey, +// StartSession opens a session between given user and the node server on the +// remote endpoint expiring after the specified epoch. The session lifetime +// coincides with the server lifetime. Resulting SessionData is used to complete +// a session token issued by provided signer. Once session is started, remote +// server becomes an issuer's trusted party and session token represents a power +// of attorney. Complete session token can be used in some operations performed +// on behalf of the issuer but performed by the server. Now it's just simplified +// creation and deletion of objects ([Client.PutObject] and +// [Client.DeleteObject] respectively). +func (c *Client) StartSession(ctx context.Context, issuer user.Signer, exp uint64, _ StartSessionOptions) (SessionData, error) { + var res SessionData + if issuer == nil { + return res, errMissingSigner } -} - -func (x *ResSessionCreate) setID(id []byte) { - x.id = id -} -// ID returns identifier of the opened session in a binary NeoFS API protocol format. -// -// Client doesn't retain value so modification is safe. -func (x ResSessionCreate) ID() []byte { - return x.id -} - -func (x *ResSessionCreate) setSessionKey(key []byte) { - x.sessionKey = key -} - -// PublicKey returns public key of the opened session in a binary NeoFS API protocol format. -// -// The resulting slice of bytes is a serialized compressed public key. See [elliptic.MarshalCompressed]. -// Use [neofsecdsa.PublicKey.Decode] to decode it into a type-specific structure. -// -// The value returned shares memory with the structure itself, so changing it can lead to data corruption. -// Make a copy if you need to change it. -func (x ResSessionCreate) PublicKey() []byte { - return x.sessionKey -} - -// SessionCreate opens a session with the node server on the remote endpoint. -// The session lifetime coincides with the server lifetime. Results can be written -// to session token which can be later attached to the requests. -// -// Any errors (local or remote, including returned status codes) are returned as Go errors, -// see [apistatus] package for NeoFS-specific error types. -// -// Context is required and must not be nil. It is used for network communication. -// -// Signer is required and must not be nil. The account will be used as owner of new session. -// -// Return errors: -// - [ErrMissingSigner] -func (c *Client) SessionCreate(ctx context.Context, signer user.Signer, prm PrmSessionCreate) (*ResSessionCreate, error) { var err error - defer func() { - c.sendStatistic(stat.MethodSessionCreate, err)() - }() - - if signer == nil { - return nil, ErrMissingSigner + if c.handleAPIOpResult != nil { + defer func(start time.Time) { + c.handleAPIOpResult(c.serverPubKey, c.endpoint, stat.MethodSessionCreate, time.Since(start), err) + }(time.Now()) } - ownerID := signer.UserID() - - var ownerIDV2 refs.OwnerID - ownerID.WriteToV2(&ownerIDV2) - - // form request body - reqBody := new(v2session.CreateRequestBody) - reqBody.SetOwnerID(&ownerIDV2) - reqBody.SetExpiration(prm.exp) - - // for request - var req v2session.CreateRequest - - req.SetBody(reqBody) - - // init call context - - var ( - cc contextCall - res ResSessionCreate - ) - - c.initCallContext(&cc) - cc.signer = signer - cc.meta = prm.prmCommonMeta - cc.req = &req - cc.call = func() (responseV2, error) { - return c.server.createSession(&c.c, &req, client.WithContext(ctx)) + // form request + req := &apisession.CreateRequest{ + Body: &apisession.CreateRequest_Body{ + OwnerId: new(refs.OwnerID), + Expiration: exp, + }, + } + issuer.UserID().WriteToV2(req.Body.OwnerId) + // FIXME: balance requests need small fixed-size buffers for encoding, its makes + // no sense to mosh them with other buffers + buf := c.signBuffers.Get().(*[]byte) + defer c.signBuffers.Put(buf) + if req.VerifyHeader, err = neofscrypto.SignRequest(issuer, req, req.Body, *buf); err != nil { + err = fmt.Errorf("%s: %w", errSignRequest, err) // for closure above + return res, err } - cc.result = func(r responseV2) { - resp := r.(*v2session.CreateResponse) - - body := resp.GetBody() - if len(body.GetID()) == 0 { - cc.err = newErrMissingResponseField("session id") - return - } + // send request + resp, err := c.transport.session.Create(ctx, req) + if err != nil { + err = fmt.Errorf("%s: %w", errTransport, err) // for closure above + return res, err + } - if len(body.GetSessionKey()) == 0 { - cc.err = newErrMissingResponseField("session key") - return + // intercept response info + if c.interceptAPIRespInfo != nil { + if err = c.interceptAPIRespInfo(ResponseMetaInfo{ + key: resp.GetVerifyHeader().GetBodySignature().GetKey(), + epoch: resp.GetMetaHeader().GetEpoch(), + }); err != nil { + err = fmt.Errorf("%s: %w", errInterceptResponseInfo, err) // for closure above + return res, err } - - res.setID(body.GetID()) - res.setSessionKey(body.GetSessionKey()) } - // process call - if !cc.processCall() { - err = cc.err - return nil, cc.err + // verify response integrity + if err = neofscrypto.VerifyResponse(resp, resp.Body); err != nil { + err = fmt.Errorf("%s: %w", errResponseSignature, err) // for closure above + return res, err + } + sts, err := apistatus.ErrorFromV2(resp.GetMetaHeader().GetStatus()) + if err != nil { + err = fmt.Errorf("%s: %w", errInvalidResponseStatus, err) // for closure above + return res, err + } + if sts != nil { + err = sts // for closure above + return res, err } - return &res, nil + // decode response payload + if resp.Body == nil { + err = errors.New(errMissingResponseBody) // for closure above + return res, err + } + const fieldID = "ID" + if len(resp.Body.Id) == 0 { + err = fmt.Errorf("%s (%s)", errMissingResponseBodyField, fieldID) // for closure above + return res, err + } else if err = res.ID.UnmarshalBinary(resp.Body.Id); err != nil { + err = fmt.Errorf("%s (%s): %w", errInvalidResponseBodyField, fieldID, err) // for closure above + return res, err + } else if v := res.ID.Version(); v != 4 { + err = fmt.Errorf("%s (%s): wrong UUID version %d", errInvalidResponseBodyField, fieldID, v) // for closure above + return res, err + } + const fieldPubKey = "public session key" + if len(resp.Body.SessionKey) == 0 { + err = fmt.Errorf("%s (%s)", errMissingResponseBodyField, fieldPubKey) // for closure above + return res, err + } + res.PublicKey = new(neofsecdsa.PublicKey) + if err = res.PublicKey.Decode(resp.Body.SessionKey); err != nil { + err = fmt.Errorf("%s (%s): %w", errInvalidResponseBodyField, fieldPubKey, err) // for closure above + return res, err + } + return res, nil } diff --git a/client/session_container.go b/client/session_container.go index df8773d5b..e57120d18 100644 --- a/client/session_container.go +++ b/client/session_container.go @@ -1,59 +1,54 @@ package client -import ( - v2session "github.com/nspcc-dev/neofs-api-go/v2/session" - "github.com/nspcc-dev/neofs-sdk-go/session" -) - -// sessionContainer is a special type which unifies session logic management for client parameters. -// All methods make public, because sessionContainer is included in Prm* structs. -type sessionContainer struct { - isSessionIgnored bool - meta v2session.RequestMetaHeader -} - -// GetSession returns session object. +// // sessionContainer is a special type which unifies session logic management for client parameters. +// // All methods make public, because sessionContainer is included in Prm* structs. +// type sessionContainer struct { +// isSessionIgnored bool +// meta v2session.RequestMetaHeader +// } // -// Returns: -// - [ErrNoSession] err if session wasn't set. -// - [ErrNoSessionExplicitly] if IgnoreSession was used. -func (x *sessionContainer) GetSession() (*session.Object, error) { - if x.isSessionIgnored { - return nil, ErrNoSessionExplicitly - } - - token := x.meta.GetSessionToken() - if token == nil { - return nil, ErrNoSession - } - - var sess session.Object - if err := sess.ReadFromV2(*token); err != nil { - return nil, err - } - - return &sess, nil -} - -// WithinSession specifies session within which the query must be executed. +// // GetSession returns session object. +// // +// // Returns: +// // - [ErrNoSession] err if session wasn't set. +// // - [ErrNoSessionExplicitly] if IgnoreSession was used. +// func (x *sessionContainer) GetSession() (*session.Object, error) { +// if x.isSessionIgnored { +// return nil, ErrNoSessionExplicitly +// } // -// Creator of the session acquires the authorship of the request. -// This may affect the execution of an operation (e.g. access control). +// token := x.meta.GetSessionToken() +// if token == nil { +// return nil, ErrNoSession +// } // -// See also IgnoreSession. +// var sess session.Object +// if err := sess.ReadFromV2(*token); err != nil { +// return nil, err +// } // -// Must be signed. -func (x *sessionContainer) WithinSession(t session.Object) { - var tokv2 v2session.Token - t.WriteToV2(&tokv2) - x.meta.SetSessionToken(&tokv2) - x.isSessionIgnored = false -} - -// IgnoreSession disables auto-session creation. +// return &sess, nil +// } +// +// // WithinSession specifies session within which the query must be executed. +// // +// // Creator of the session acquires the authorship of the request. +// // This may affect the execution of an operation (e.g. access control). +// // +// // See also IgnoreSession. +// // +// // Must be signed. +// func (x *sessionContainer) WithinSession(t session.Object) { +// var tokv2 v2session.Token +// t.WriteToV2(&tokv2) +// x.meta.SetSessionToken(&tokv2) +// x.isSessionIgnored = false +// } // -// See also WithinSession. -func (x *sessionContainer) IgnoreSession() { - x.isSessionIgnored = true - x.meta.SetSessionToken(nil) -} +// // IgnoreSession disables auto-session creation. +// // +// // See also WithinSession. +// func (x *sessionContainer) IgnoreSession() { +// x.isSessionIgnored = true +// x.meta.SetSessionToken(nil) +// } diff --git a/client/session_test.go b/client/session_test.go index 9b92ab473..8e13d0c39 100644 --- a/client/session_test.go +++ b/client/session_test.go @@ -1,68 +1,449 @@ package client import ( + "bytes" "context" + "errors" + "fmt" + "math/rand" + "net" "testing" + "time" - v2netmap "github.com/nspcc-dev/neofs-api-go/v2/netmap" - "github.com/nspcc-dev/neofs-api-go/v2/rpc/client" - "github.com/nspcc-dev/neofs-api-go/v2/session" + "github.com/google/uuid" + apinetmap "github.com/nspcc-dev/neofs-sdk-go/api/netmap" + "github.com/nspcc-dev/neofs-sdk-go/api/refs" + apisession "github.com/nspcc-dev/neofs-sdk-go/api/session" + "github.com/nspcc-dev/neofs-sdk-go/api/status" + apistatus "github.com/nspcc-dev/neofs-sdk-go/client/status" neofscrypto "github.com/nspcc-dev/neofs-sdk-go/crypto" - "github.com/nspcc-dev/neofs-sdk-go/crypto/test" + neofsecdsa "github.com/nspcc-dev/neofs-sdk-go/crypto/ecdsa" + neofscryptotest "github.com/nspcc-dev/neofs-sdk-go/crypto/test" + netmaptest "github.com/nspcc-dev/neofs-sdk-go/netmap/test" + "github.com/nspcc-dev/neofs-sdk-go/stat" + "github.com/nspcc-dev/neofs-sdk-go/user" + usertest "github.com/nspcc-dev/neofs-sdk-go/user/test" + versiontest "github.com/nspcc-dev/neofs-sdk-go/version/test" "github.com/stretchr/testify/require" + "google.golang.org/grpc" + "google.golang.org/grpc/test/bufconn" ) -type sessionAPIServer struct { - signer neofscrypto.Signer - setBody func(body *session.CreateResponseBody) -} +type noOtherSessionCalls struct{} -func (m sessionAPIServer) netMapSnapshot(context.Context, v2netmap.SnapshotRequest) (*v2netmap.SnapshotResponse, error) { - return nil, nil +func (noOtherSessionCalls) Create(context.Context, *apisession.CreateRequest) (*apisession.CreateResponse, error) { + panic("must not be called") } -func (m sessionAPIServer) createSession(*client.Client, *session.CreateRequest, ...client.CallOption) (*session.CreateResponse, error) { - var body session.CreateResponseBody - m.setBody(&body) - - var resp session.CreateResponse - resp.SetBody(&body) +type startSessionServer struct { + noOtherSessionCalls + // client + issuer user.Signer + exp uint64 + // server + sleepDur time.Duration + endpointInfoOnDialServer + id uuid.UUID + sessionPubKey neofscrypto.PublicKey + errTransport error + modifyResp func(*apisession.CreateResponse) + corruptRespSig func(*apisession.CreateResponse) +} - if err := signServiceMessage(m.signer, &resp, nil); err != nil { - return nil, err +func (x startSessionServer) Create(ctx context.Context, req *apisession.CreateRequest) (*apisession.CreateResponse, error) { + if x.sleepDur > 0 { + time.Sleep(x.sleepDur) + } + if x.errTransport != nil { + return nil, x.errTransport + } + var sts status.Status + resp := apisession.CreateResponse{ + MetaHeader: &apisession.ResponseMetaHeader{Status: &sts, Epoch: x.epoch}, + } + var err error + var usr user.ID + sigScheme := refs.SignatureScheme(x.issuer.Scheme()) + creatorPubKey := neofscrypto.PublicKeyBytes(x.issuer.Public()) + if ctx == nil { + sts.Code, sts.Message = status.InternalServerError, "nil context" + } else if req == nil { + sts.Code, sts.Message = status.InternalServerError, "nil request" + } else if err = neofscrypto.VerifyRequest(req, req.Body); err != nil { + sts.Code, sts.Message = status.SignatureVerificationFail, err.Error() + } else if req.VerifyHeader.BodySignature.Scheme != sigScheme || + !bytes.Equal(req.VerifyHeader.BodySignature.Key, creatorPubKey) { + sts.Code, sts.Message = status.InternalServerError, "[test] unexpected request body signature credentials" + } else if req.VerifyHeader.MetaSignature.Scheme != sigScheme || + !bytes.Equal(req.VerifyHeader.MetaSignature.Key, creatorPubKey) { + sts.Code, sts.Message = status.InternalServerError, "[test] unexpected request meta header signature credentials" + } else if req.VerifyHeader.OriginSignature.Scheme != sigScheme || + !bytes.Equal(req.VerifyHeader.OriginSignature.Key, creatorPubKey) { + sts.Code, sts.Message = status.InternalServerError, "[test] unexpected origin request verification header signature credentials" + } else if req.MetaHeader != nil { + sts.Code, sts.Message = status.InternalServerError, "invalid request: meta header is set" + } else if req.Body == nil { + sts.Code, sts.Message = status.InternalServerError, "invalid request: missing body" + } else if req.Body.Expiration != x.exp { + sts.Code, sts.Message = status.InternalServerError, "[test] wrong expiration timestamp" + } else if req.Body.OwnerId == nil { + sts.Code, sts.Message = status.InternalServerError, "invalid request: invalid body: missing issuer" + } else if err = usr.ReadFromV2(req.Body.OwnerId); err != nil { + sts.Code, sts.Message = status.InternalServerError, fmt.Sprintf("invalid request: invalid body: invalid issuer: %s", err) + } else if usr != x.issuer.UserID() { + sts.Code, sts.Message = status.InternalServerError, "[test] wrong issuer" + } else { + resp.Body = &apisession.CreateResponse_Body{ + Id: x.id[:], + SessionKey: neofscrypto.PublicKeyBytes(x.sessionPubKey), + } + } + if x.modifyResp != nil { + x.modifyResp(&resp) + } + resp.VerifyHeader, err = neofscrypto.SignResponse(x.serverSigner, &resp, resp.Body, nil) + if err != nil { + return nil, fmt.Errorf("sign response: %w", err) + } + if x.corruptRespSig != nil { + x.corruptRespSig(&resp) } - return &resp, nil } -func TestClient_SessionCreate(t *testing.T) { +func TestClient_StartSession(t *testing.T) { ctx := context.Background() - signer := test.RandomSignerRFC6979(t) + var srv startSessionServer + srv.sleepDur = 10 * time.Millisecond + srv.serverSigner = neofscryptotest.RandomSigner() + srv.latestVersion = versiontest.Version() + srv.nodeInfo = netmaptest.NodeInfo() + srv.nodeInfo.SetPublicKey(neofscrypto.PublicKeyBytes(srv.serverSigner.Public())) + srv.issuer, _ = usertest.TwoUsers() + srv.exp = rand.Uint64() + srv.id = uuid.New() + sessionKey := neofsecdsa.Signer(neofscryptotest.ECDSAPrivateKey()) + srv.sessionPubKey = sessionKey.Public() + _dial := func(t testing.TB, srv *startSessionServer, assertErr func(error), customizeOpts func(*Options)) (*Client, *bool) { + var opts Options + var handlerCalled bool + opts.SetAPIRequestResultHandler(func(nodeKey []byte, endpoint string, op stat.Method, dur time.Duration, err error) { + handlerCalled = true + require.Equal(t, srv.nodeInfo.PublicKey(), nodeKey) + require.Equal(t, "localhost:8080", endpoint) + require.Equal(t, stat.MethodSessionCreate, op) + require.Greater(t, dur, srv.sleepDur) + assertErr(err) + }) + if customizeOpts != nil { + customizeOpts(&opts) + } - c := newClient(t, nil) + c, err := New(anyValidURI, opts) + require.NoError(t, err) - var prmSessionCreate PrmSessionCreate - prmSessionCreate.SetExp(1) + conn := bufconn.Listen(10 << 10) + gs := grpc.NewServer() + apinetmap.RegisterNetmapServiceServer(gs, srv) + apisession.RegisterSessionServiceServer(gs, srv) + go func() { _ = gs.Serve(conn) }() + t.Cleanup(gs.Stop) - t.Run("missing session id", func(t *testing.T) { - c.setNeoFSAPIServer(&sessionAPIServer{signer: signer, setBody: func(body *session.CreateResponseBody) { - body.SetSessionKey([]byte{1}) - }}) + c.dial = func(ctx context.Context, _ string) (net.Conn, error) { return conn.DialContext(ctx) } + require.NoError(t, c.Dial(ctx)) - result, err := c.SessionCreate(ctx, signer, prmSessionCreate) - require.Nil(t, result) - require.ErrorIs(t, err, ErrMissingResponseField) - require.Equal(t, "missing session id field in the response", err.Error()) + return c, &handlerCalled + } + dial := func(t testing.TB, srv *startSessionServer, assertErr func(error)) (*Client, *bool) { + return _dial(t, srv, assertErr, nil) + } + t.Run("invalid signer", func(t *testing.T) { + c, err := New(anyValidURI, Options{}) + require.NoError(t, err) + _, err = c.StartSession(ctx, nil, srv.exp, StartSessionOptions{}) + require.ErrorIs(t, err, errMissingSigner) }) + t.Run("OK", func(t *testing.T) { + srv := srv + assertErr := func(err error) { require.NoError(t, err) } + c, handlerCalled := dial(t, &srv, assertErr) + res, err := c.StartSession(ctx, srv.issuer, srv.exp, StartSessionOptions{}) + assertErr(err) + require.Equal(t, srv.id, res.ID) + require.Equal(t, srv.sessionPubKey, res.PublicKey) + require.True(t, *handlerCalled) + }) + t.Run("fail", func(t *testing.T) { + t.Run("sign request", func(t *testing.T) { + srv := srv + srv.sleepDur = 0 + assertErr := func(err error) { require.ErrorContains(t, err, errSignRequest) } + c, handlerCalled := dial(t, &srv, assertErr) + _, err := c.StartSession(ctx, usertest.FailSigner(srv.issuer), srv.exp, StartSessionOptions{}) + assertErr(err) + require.True(t, *handlerCalled) + }) + t.Run("transport", func(t *testing.T) { + srv := srv + srv.errTransport = errors.New("any transport failure") + assertErr := func(err error) { + require.ErrorContains(t, err, errTransport) + require.ErrorContains(t, err, "any transport failure") + } + c, handlerCalled := dial(t, &srv, assertErr) + _, err := c.StartSession(ctx, srv.issuer, srv.exp, StartSessionOptions{}) + assertErr(err) + require.True(t, *handlerCalled) + }) + t.Run("invalid response signature", func(t *testing.T) { + for i, testCase := range []struct { + err string + corrupt func(*apisession.CreateResponse) + }{ + {err: "missing verification header", + corrupt: func(r *apisession.CreateResponse) { r.VerifyHeader = nil }, + }, + {err: "missing body signature", + corrupt: func(r *apisession.CreateResponse) { r.VerifyHeader.BodySignature = nil }, + }, + {err: "missing signature of the meta header", + corrupt: func(r *apisession.CreateResponse) { r.VerifyHeader.MetaSignature = nil }, + }, + {err: "missing signature of the origin verification header", + corrupt: func(r *apisession.CreateResponse) { r.VerifyHeader.OriginSignature = nil }, + }, + {err: "verify body signature: missing public key", + corrupt: func(r *apisession.CreateResponse) { r.VerifyHeader.BodySignature.Key = nil }, + }, + {err: "verify signature of the meta header: missing public key", + corrupt: func(r *apisession.CreateResponse) { r.VerifyHeader.MetaSignature.Key = nil }, + }, + {err: "verify signature of the origin verification header: missing public key", + corrupt: func(r *apisession.CreateResponse) { r.VerifyHeader.OriginSignature.Key = nil }, + }, + {err: "verify body signature: decode public key from binary", + corrupt: func(r *apisession.CreateResponse) { + r.VerifyHeader.BodySignature.Key = []byte("not a public key") + }, + }, + {err: "verify signature of the meta header: decode public key from binary", + corrupt: func(r *apisession.CreateResponse) { + r.VerifyHeader.MetaSignature.Key = []byte("not a public key") + }, + }, + {err: "verify signature of the origin verification header: decode public key from binary", + corrupt: func(r *apisession.CreateResponse) { + r.VerifyHeader.OriginSignature.Key = []byte("not a public key") + }, + }, + {err: "verify body signature: invalid scheme -1", + corrupt: func(r *apisession.CreateResponse) { r.VerifyHeader.BodySignature.Scheme = -1 }, + }, + {err: "verify body signature: unsupported scheme 3", + corrupt: func(r *apisession.CreateResponse) { r.VerifyHeader.BodySignature.Scheme = 3 }, + }, + {err: "verify signature of the meta header: unsupported scheme 3", + corrupt: func(r *apisession.CreateResponse) { r.VerifyHeader.MetaSignature.Scheme = 3 }, + }, + {err: "verify signature of the origin verification header: unsupported scheme 3", + corrupt: func(r *apisession.CreateResponse) { r.VerifyHeader.OriginSignature.Scheme = 3 }, + }, + {err: "verify body signature: signature mismatch", + corrupt: func(r *apisession.CreateResponse) { r.VerifyHeader.BodySignature.Sign[0]++ }, + }, + {err: "verify signature of the meta header: signature mismatch", + corrupt: func(r *apisession.CreateResponse) { r.VerifyHeader.MetaSignature.Sign[0]++ }, + }, + {err: "verify signature of the origin verification header: signature mismatch", + corrupt: func(r *apisession.CreateResponse) { r.VerifyHeader.OriginSignature.Sign[0]++ }, + }, + {err: "verify body signature: signature mismatch", + corrupt: func(r *apisession.CreateResponse) { + r.VerifyHeader.BodySignature.Key = neofscrypto.PublicKeyBytes(neofscryptotest.RandomSigner().Public()) + }, + }, + {err: "verify signature of the meta header: signature mismatch", + corrupt: func(r *apisession.CreateResponse) { + r.VerifyHeader.MetaSignature.Key = neofscrypto.PublicKeyBytes(neofscryptotest.RandomSigner().Public()) + }, + }, + {err: "verify signature of the origin verification header: signature mismatch", + corrupt: func(r *apisession.CreateResponse) { + r.VerifyHeader.OriginSignature.Key = neofscrypto.PublicKeyBytes(neofscryptotest.RandomSigner().Public()) + }, + }, + } { + srv := srv + srv.corruptRespSig = testCase.corrupt + assertErr := func(err error) { + require.ErrorContains(t, err, errResponseSignature, [2]any{i, testCase}) + require.ErrorContains(t, err, testCase.err, [2]any{i, testCase}) + } + c, handlerCalled := dial(t, &srv, assertErr) + _, err := c.StartSession(ctx, srv.issuer, srv.exp, StartSessionOptions{}) + assertErr(err) + require.True(t, *handlerCalled) + } + }) + t.Run("invalid response status", func(t *testing.T) { + srv := srv + srv.modifyResp = func(r *apisession.CreateResponse) { + r.MetaHeader.Status = &status.Status{Code: status.InternalServerError, Details: make([]*status.Status_Detail, 1)} + } + assertErr := func(err error) { + require.ErrorContains(t, err, errInvalidResponseStatus) + require.ErrorContains(t, err, "details attached but not supported") + } + c, handlerCalled := dial(t, &srv, assertErr) + _, err := c.StartSession(ctx, srv.issuer, srv.exp, StartSessionOptions{}) + assertErr(err) + require.True(t, *handlerCalled) + }) + t.Run("status errors", func(t *testing.T) { + for _, testCase := range []struct { + code uint32 + errConst error + errVar any + }{ + {code: 1 << 32 / 2}, + {code: status.InternalServerError, errConst: apistatus.ErrServerInternal, errVar: new(apistatus.InternalServerError)}, + {code: status.SignatureVerificationFail, errConst: apistatus.ErrSignatureVerification, errVar: new(apistatus.SignatureVerificationFailure)}, + } { + srv := srv + srv.modifyResp = func(r *apisession.CreateResponse) { + r.MetaHeader.Status = &status.Status{Code: testCase.code, Message: "any message"} + } + assertErr := func(err error) { + require.ErrorIs(t, err, apistatus.Error, testCase) + require.ErrorContains(t, err, "any message", testCase) + if testCase.errConst != nil { + require.ErrorIs(t, err, testCase.errConst, testCase) + } + if testCase.errVar != nil { + require.ErrorAs(t, err, testCase.errVar, testCase) + } + } + c, handlerCalled := dial(t, &srv, assertErr) + _, err := c.StartSession(ctx, srv.issuer, srv.exp, StartSessionOptions{}) + assertErr(err) + require.True(t, *handlerCalled, testCase) + } + }) + t.Run("response body", func(t *testing.T) { + t.Run("missing", func(t *testing.T) { + srv := srv + assertErr := func(err error) { require.EqualError(t, err, "invalid response: missing body") } + c, handlerCalled := dial(t, &srv, assertErr) + srv.modifyResp = func(r *apisession.CreateResponse) { r.Body = nil } + _, err := c.StartSession(ctx, srv.issuer, srv.exp, StartSessionOptions{}) + assertErr(err) + require.True(t, *handlerCalled) + }) + t.Run("missing ID", func(t *testing.T) { + srv := srv + srv.modifyResp = func(r *apisession.CreateResponse) { r.Body.Id = nil } + assertErr := func(err error) { + require.EqualError(t, err, "invalid response: invalid body: missing required field (ID)") + } + c, handlerCalled := dial(t, &srv, assertErr) + _, err := c.StartSession(ctx, srv.issuer, srv.exp, StartSessionOptions{}) + assertErr(err) + require.True(t, *handlerCalled) + + srv.modifyResp = func(r *apisession.CreateResponse) { r.Body.Id = []byte{} } + _, err = c.StartSession(ctx, srv.issuer, srv.exp, StartSessionOptions{}) + assertErr(err) + }) + t.Run("invalid ID", func(t *testing.T) { + srv := srv + srv.modifyResp = func(r *apisession.CreateResponse) { r.Body.Id = make([]byte, 17) } + assertErr := func(err error) { + require.ErrorContains(t, err, "invalid response: invalid body: invalid field (ID)") + } + c, handlerCalled := dial(t, &srv, assertErr) + _, err := c.StartSession(ctx, srv.issuer, srv.exp, StartSessionOptions{}) + assertErr(err) + require.ErrorContains(t, err, "invalid UUID (got 17 bytes)") + require.True(t, *handlerCalled) - t.Run("missing session key", func(t *testing.T) { - c.setNeoFSAPIServer(&sessionAPIServer{signer: signer, setBody: func(body *session.CreateResponseBody) { - body.SetID([]byte{1}) - }}) + srv.modifyResp = func(r *apisession.CreateResponse) { r.Body.Id[6] = 1 << 4 } + _, err = c.StartSession(ctx, srv.issuer, srv.exp, StartSessionOptions{}) + assertErr(err) + require.ErrorContains(t, err, "wrong UUID version 1") + }) + t.Run("missing public session key", func(t *testing.T) { + srv := srv + srv.modifyResp = func(r *apisession.CreateResponse) { r.Body.SessionKey = nil } + assertErr := func(err error) { + require.EqualError(t, err, "invalid response: invalid body: missing required field (public session key)") + } + c, handlerCalled := dial(t, &srv, assertErr) + _, err := c.StartSession(ctx, srv.issuer, srv.exp, StartSessionOptions{}) + assertErr(err) + require.True(t, *handlerCalled) - result, err := c.SessionCreate(ctx, signer, prmSessionCreate) - require.Nil(t, result) - require.ErrorIs(t, err, ErrMissingResponseField) - require.Equal(t, "missing session key field in the response", err.Error()) + srv.modifyResp = func(r *apisession.CreateResponse) { r.Body.SessionKey = []byte{} } + _, err = c.StartSession(ctx, srv.issuer, srv.exp, StartSessionOptions{}) + assertErr(err) + }) + t.Run("invalid public session key", func(t *testing.T) { + srv := srv + srv.modifyResp = func(r *apisession.CreateResponse) { r.Body.SessionKey = r.Body.SessionKey[:32] } + assertErr := func(err error) { + require.ErrorContains(t, err, "invalid response: invalid body: invalid field (public session key)") + } + c, handlerCalled := dial(t, &srv, assertErr) + _, err := c.StartSession(ctx, srv.issuer, srv.exp, StartSessionOptions{}) + assertErr(err) + require.ErrorContains(t, err, "unexpected EOF") + require.True(t, *handlerCalled) + + srv.modifyResp = func(r *apisession.CreateResponse) { r.Body.SessionKey[0] = 255 } + _, err = c.StartSession(ctx, srv.issuer, srv.exp, StartSessionOptions{}) + assertErr(err) + require.ErrorContains(t, err, "invalid prefix 255") + }) + }) + }) + t.Run("response info handler", func(t *testing.T) { + t.Run("OK", func(t *testing.T) { + srv := srv + srv.epoch = 3598503 + assertErr := func(err error) { require.NoError(t, err) } + respHandlerCalled := false + c, reqHandlerCalled := _dial(t, &srv, assertErr, func(opts *Options) { + opts.SetAPIResponseInfoInterceptor(func(info ResponseMetaInfo) error { + respHandlerCalled = true + require.EqualValues(t, 3598503, info.Epoch()) + require.Equal(t, neofscrypto.PublicKeyBytes(srv.serverSigner.Public()), info.ResponderKey()) + return nil + }) + }) + _, err := c.StartSession(ctx, srv.issuer, srv.exp, StartSessionOptions{}) + assertErr(err) + require.True(t, respHandlerCalled) + require.True(t, *reqHandlerCalled) + }) + t.Run("fail", func(t *testing.T) { + srv := srv + srv.epoch = 4386380643 + assertErr := func(err error) { require.ErrorContains(t, err, "intercept response info: some handler error") } + respHandlerCalled := false + c, reqHandlerCalled := _dial(t, &srv, assertErr, func(opts *Options) { + opts.SetAPIResponseInfoInterceptor(func(info ResponseMetaInfo) error { + if !respHandlerCalled { // dial + respHandlerCalled = true + return nil + } + require.EqualValues(t, 4386380643, info.Epoch()) + require.Equal(t, neofscrypto.PublicKeyBytes(srv.serverSigner.Public()), info.ResponderKey()) + return errors.New("some handler error") + }) + }) + _, err := c.StartSession(ctx, srv.issuer, srv.exp, StartSessionOptions{}) + assertErr(err) + require.True(t, respHandlerCalled) + require.True(t, *reqHandlerCalled) + }) }) } diff --git a/client/sign.go b/client/sign.go deleted file mode 100644 index 55bbf0a2b..000000000 --- a/client/sign.go +++ /dev/null @@ -1,390 +0,0 @@ -package client - -import ( - "errors" - "fmt" - - "github.com/nspcc-dev/neofs-api-go/v2/accounting" - "github.com/nspcc-dev/neofs-api-go/v2/container" - "github.com/nspcc-dev/neofs-api-go/v2/netmap" - "github.com/nspcc-dev/neofs-api-go/v2/object" - "github.com/nspcc-dev/neofs-api-go/v2/refs" - "github.com/nspcc-dev/neofs-api-go/v2/reputation" - "github.com/nspcc-dev/neofs-api-go/v2/session" - "github.com/nspcc-dev/neofs-api-go/v2/util/signature" - neofscrypto "github.com/nspcc-dev/neofs-sdk-go/crypto" -) - -type serviceRequest interface { - GetMetaHeader() *session.RequestMetaHeader - GetVerificationHeader() *session.RequestVerificationHeader - SetVerificationHeader(*session.RequestVerificationHeader) -} - -type serviceResponse interface { - GetMetaHeader() *session.ResponseMetaHeader - GetVerificationHeader() *session.ResponseVerificationHeader - SetVerificationHeader(*session.ResponseVerificationHeader) -} - -type stableMarshaler interface { - StableMarshal([]byte) []byte - StableSize() int -} - -type stableMarshalerWrapper struct { - SM stableMarshaler -} - -type metaHeader interface { - stableMarshaler - getOrigin() metaHeader -} - -type verificationHeader interface { - stableMarshaler - - GetBodySignature() *refs.Signature - SetBodySignature(*refs.Signature) - GetMetaSignature() *refs.Signature - SetMetaSignature(*refs.Signature) - GetOriginSignature() *refs.Signature - SetOriginSignature(*refs.Signature) - - setOrigin(stableMarshaler) - getOrigin() verificationHeader -} - -type requestMetaHeader struct { - *session.RequestMetaHeader -} - -type responseMetaHeader struct { - *session.ResponseMetaHeader -} - -type requestVerificationHeader struct { - *session.RequestVerificationHeader -} - -type responseVerificationHeader struct { - *session.ResponseVerificationHeader -} - -func (h *requestMetaHeader) getOrigin() metaHeader { - return &requestMetaHeader{ - RequestMetaHeader: h.GetOrigin(), - } -} - -func (h *responseMetaHeader) getOrigin() metaHeader { - return &responseMetaHeader{ - ResponseMetaHeader: h.GetOrigin(), - } -} - -func (h *requestVerificationHeader) getOrigin() verificationHeader { - if origin := h.GetOrigin(); origin != nil { - return &requestVerificationHeader{ - RequestVerificationHeader: origin, - } - } - - return nil -} - -func (h *requestVerificationHeader) setOrigin(m stableMarshaler) { - if m != nil { - h.SetOrigin(m.(*session.RequestVerificationHeader)) - } -} - -func (r *responseVerificationHeader) getOrigin() verificationHeader { - if origin := r.GetOrigin(); origin != nil { - return &responseVerificationHeader{ - ResponseVerificationHeader: origin, - } - } - - return nil -} - -func (r *responseVerificationHeader) setOrigin(m stableMarshaler) { - if m != nil { - r.SetOrigin(m.(*session.ResponseVerificationHeader)) - } -} - -func (s stableMarshalerWrapper) ReadSignedData(buf []byte) ([]byte, error) { - if s.SM != nil { - return s.SM.StableMarshal(buf), nil - } - - return nil, nil -} - -func (s stableMarshalerWrapper) SignedDataSize() int { - if s.SM != nil { - return s.SM.StableSize() - } - - return 0 -} - -// signServiceMessage signing request or response messages which can be sent or received from neofs endpoint. -// Return errors: -// - [ErrSign] -func signServiceMessage(signer neofscrypto.Signer, msg any, buf []byte) error { - var ( - body, meta, verifyOrigin stableMarshaler - verifyHdr verificationHeader - verifyHdrSetter func(verificationHeader) - ) - - switch v := msg.(type) { - case nil: - return nil - case serviceRequest: - body = serviceMessageBody(v) - meta = v.GetMetaHeader() - verifyHdr = &requestVerificationHeader{new(session.RequestVerificationHeader)} - verifyHdrSetter = func(h verificationHeader) { - v.SetVerificationHeader(h.(*requestVerificationHeader).RequestVerificationHeader) - } - - if h := v.GetVerificationHeader(); h != nil { - verifyOrigin = h - } - case serviceResponse: - body = serviceMessageBody(v) - meta = v.GetMetaHeader() - verifyHdr = &responseVerificationHeader{new(session.ResponseVerificationHeader)} - verifyHdrSetter = func(h verificationHeader) { - v.SetVerificationHeader(h.(*responseVerificationHeader).ResponseVerificationHeader) - } - - if h := v.GetVerificationHeader(); h != nil { - verifyOrigin = h - } - default: - return NewSignError(fmt.Errorf("unsupported session message %T", v)) - } - - if verifyOrigin == nil { - // sign session message body - if err := signServiceMessagePart(signer, body, verifyHdr.SetBodySignature, buf); err != nil { - return NewSignError(fmt.Errorf("body: %w", err)) - } - } - - // sign meta header - if err := signServiceMessagePart(signer, meta, verifyHdr.SetMetaSignature, buf); err != nil { - return NewSignError(fmt.Errorf("meta header: %w", err)) - } - - // sign verification header origin - if err := signServiceMessagePart(signer, verifyOrigin, verifyHdr.SetOriginSignature, buf); err != nil { - return NewSignError(fmt.Errorf("origin of verification header: %w", err)) - } - - // wrap origin verification header - verifyHdr.setOrigin(verifyOrigin) - - // update matryoshka verification header - verifyHdrSetter(verifyHdr) - - return nil -} - -func signServiceMessagePart(signer neofscrypto.Signer, part stableMarshaler, sigWrite func(*refs.Signature), buf []byte) error { - var sig neofscrypto.Signature - var sigv2 refs.Signature - - if err := sig.CalculateMarshalled(signer, part, buf); err != nil { - return fmt.Errorf("calculate %w", err) - } - - sig.WriteToV2(&sigv2) - sigWrite(&sigv2) - - return nil -} - -func verifyServiceMessage(msg any) error { - var ( - meta metaHeader - verify verificationHeader - ) - - switch v := msg.(type) { - case nil: - return nil - case serviceRequest: - meta = &requestMetaHeader{ - RequestMetaHeader: v.GetMetaHeader(), - } - - verify = &requestVerificationHeader{ - RequestVerificationHeader: v.GetVerificationHeader(), - } - case serviceResponse: - meta = &responseMetaHeader{ - ResponseMetaHeader: v.GetMetaHeader(), - } - - verify = &responseVerificationHeader{ - ResponseVerificationHeader: v.GetVerificationHeader(), - } - default: - return fmt.Errorf("unsupported session message %T", v) - } - - body := serviceMessageBody(msg) - size := body.StableSize() - if sz := meta.StableSize(); sz > size { - size = sz - } - if sz := verify.StableSize(); sz > size { - size = sz - } - - buf := make([]byte, 0, size) - return verifyMatryoshkaLevel(body, meta, verify, buf) -} - -func verifyMatryoshkaLevel(body stableMarshaler, meta metaHeader, verify verificationHeader, buf []byte) error { - if err := verifyServiceMessagePart(meta, verify.GetMetaSignature, buf); err != nil { - return fmt.Errorf("could not verify meta header: %w", err) - } - - origin := verify.getOrigin() - - if err := verifyServiceMessagePart(origin, verify.GetOriginSignature, buf); err != nil { - return fmt.Errorf("could not verify origin of verification header: %w", err) - } - - if origin == nil { - if err := verifyServiceMessagePart(body, verify.GetBodySignature, buf); err != nil { - return fmt.Errorf("could not verify body: %w", err) - } - - return nil - } - - if verify.GetBodySignature() != nil { - return errors.New("body signature at the matryoshka upper level") - } - - return verifyMatryoshkaLevel(body, meta.getOrigin(), origin, buf) -} - -func verifyServiceMessagePart(part stableMarshaler, sigRdr func() *refs.Signature, buf []byte) error { - return signature.VerifyDataWithSource( - &stableMarshalerWrapper{part}, - sigRdr, - signature.WithBuffer(buf), - ) -} - -func serviceMessageBody(req any) stableMarshaler { - switch v := req.(type) { - default: - panic(fmt.Sprintf("unsupported session message %T", req)) - - /* Accounting */ - case *accounting.BalanceRequest: - return v.GetBody() - case *accounting.BalanceResponse: - return v.GetBody() - - /* Session */ - case *session.CreateRequest: - return v.GetBody() - case *session.CreateResponse: - return v.GetBody() - - /* Container */ - case *container.PutRequest: - return v.GetBody() - case *container.PutResponse: - return v.GetBody() - case *container.DeleteRequest: - return v.GetBody() - case *container.DeleteResponse: - return v.GetBody() - case *container.GetRequest: - return v.GetBody() - case *container.GetResponse: - return v.GetBody() - case *container.ListRequest: - return v.GetBody() - case *container.ListResponse: - return v.GetBody() - case *container.SetExtendedACLRequest: - return v.GetBody() - case *container.SetExtendedACLResponse: - return v.GetBody() - case *container.GetExtendedACLRequest: - return v.GetBody() - case *container.GetExtendedACLResponse: - return v.GetBody() - case *container.AnnounceUsedSpaceRequest: - return v.GetBody() - case *container.AnnounceUsedSpaceResponse: - return v.GetBody() - - /* Object */ - case *object.PutRequest: - return v.GetBody() - case *object.PutResponse: - return v.GetBody() - case *object.GetRequest: - return v.GetBody() - case *object.GetResponse: - return v.GetBody() - case *object.HeadRequest: - return v.GetBody() - case *object.HeadResponse: - return v.GetBody() - case *object.SearchRequest: - return v.GetBody() - case *object.SearchResponse: - return v.GetBody() - case *object.DeleteRequest: - return v.GetBody() - case *object.DeleteResponse: - return v.GetBody() - case *object.GetRangeRequest: - return v.GetBody() - case *object.GetRangeResponse: - return v.GetBody() - case *object.GetRangeHashRequest: - return v.GetBody() - case *object.GetRangeHashResponse: - return v.GetBody() - - /* Netmap */ - case *netmap.LocalNodeInfoRequest: - return v.GetBody() - case *netmap.LocalNodeInfoResponse: - return v.GetBody() - case *netmap.NetworkInfoRequest: - return v.GetBody() - case *netmap.NetworkInfoResponse: - return v.GetBody() - case *netmap.SnapshotRequest: - return v.GetBody() - case *netmap.SnapshotResponse: - return v.GetBody() - - /* Reputation */ - case *reputation.AnnounceLocalTrustRequest: - return v.GetBody() - case *reputation.AnnounceLocalTrustResponse: - return v.GetBody() - case *reputation.AnnounceIntermediateResultRequest: - return v.GetBody() - case *reputation.AnnounceIntermediateResultResponse: - return v.GetBody() - } -} diff --git a/client/sign_test.go b/client/sign_test.go deleted file mode 100644 index 46687a92e..000000000 --- a/client/sign_test.go +++ /dev/null @@ -1,242 +0,0 @@ -package client - -import ( - "crypto/rand" - "testing" - - "github.com/nspcc-dev/neofs-api-go/v2/accounting" - "github.com/nspcc-dev/neofs-api-go/v2/refs" - "github.com/nspcc-dev/neofs-api-go/v2/session" - neofscrypto "github.com/nspcc-dev/neofs-sdk-go/crypto" - "github.com/nspcc-dev/neofs-sdk-go/crypto/test" - "github.com/stretchr/testify/require" -) - -type testResponse interface { - SetMetaHeader(*session.ResponseMetaHeader) - GetMetaHeader() *session.ResponseMetaHeader -} - -func testOwner(t *testing.T, owner *refs.OwnerID, req any) { - originalValue := owner.GetValue() - owner.SetValue([]byte{1, 2, 3}) - // verification must fail - require.Error(t, verifyServiceMessage(req)) - owner.SetValue(originalValue) - require.NoError(t, verifyServiceMessage(req)) -} - -func testRequestSign(t *testing.T, signer neofscrypto.Signer, meta *session.RequestMetaHeader, req request) { - require.Error(t, verifyServiceMessage(req)) - - // sign request - require.NoError(t, signServiceMessage(signer, req, nil)) - - // verification must pass - require.NoError(t, verifyServiceMessage(req)) - - meta.SetOrigin(req.GetMetaHeader()) - req.SetMetaHeader(meta) - - // sign request - require.NoError(t, signServiceMessage(signer, req, nil)) - - // verification must pass - require.NoError(t, verifyServiceMessage(req)) -} - -func testRequestMeta(t *testing.T, meta *session.RequestMetaHeader, req serviceRequest) { - // corrupt meta header - meta.SetTTL(meta.GetTTL() + 1) - - // verification must fail - require.Error(t, verifyServiceMessage(req)) - - // restore meta header - meta.SetTTL(meta.GetTTL() - 1) - - // corrupt origin verification header - req.GetVerificationHeader().SetOrigin(nil) - - // verification must fail - require.Error(t, verifyServiceMessage(req)) -} - -func testResponseSign(t *testing.T, signer neofscrypto.Signer, meta *session.ResponseMetaHeader, resp testResponse) { - require.Error(t, verifyServiceMessage(resp)) - - // sign request - require.NoError(t, signServiceMessage(signer, resp, nil)) - - // verification must pass - require.NoError(t, verifyServiceMessage(resp)) - - meta.SetOrigin(resp.GetMetaHeader()) - resp.SetMetaHeader(meta) - - // sign request - require.NoError(t, signServiceMessage(signer, resp, nil)) - - // verification must pass - require.NoError(t, verifyServiceMessage(resp)) -} - -func testResponseMeta(t *testing.T, meta *session.ResponseMetaHeader, req serviceResponse) { - // corrupt meta header - meta.SetTTL(meta.GetTTL() + 1) - - // verification must fail - require.Error(t, verifyServiceMessage(req)) - - // restore meta header - meta.SetTTL(meta.GetTTL() - 1) - - // corrupt origin verification header - req.GetVerificationHeader().SetOrigin(nil) - - // verification must fail - require.Error(t, verifyServiceMessage(req)) -} - -func TestEmptyMessage(t *testing.T) { - signer := test.RandomSignerRFC6979(t) - - require.NoError(t, verifyServiceMessage(nil)) - require.NoError(t, signServiceMessage(signer, nil, nil)) -} - -func TestBalanceRequest(t *testing.T) { - signer := test.RandomSignerRFC6979(t) - id := signer.UserID() - - var ownerID refs.OwnerID - id.WriteToV2(&ownerID) - - body := accounting.BalanceRequestBody{} - body.SetOwnerID(&ownerID) - - meta := &session.RequestMetaHeader{} - meta.SetTTL(1) - - req := &accounting.BalanceRequest{} - req.SetBody(&body) - req.SetMetaHeader(meta) - - // add level to meta header matryoshka - meta = &session.RequestMetaHeader{} - testRequestSign(t, signer, meta, req) - - testOwner(t, &ownerID, req) - testRequestMeta(t, meta, req) -} - -func TestBalanceResponse(t *testing.T) { - signer := test.RandomSignerRFC6979(t) - - dec := new(accounting.Decimal) - dec.SetValue(100) - - body := new(accounting.BalanceResponseBody) - body.SetBalance(dec) - - meta := new(session.ResponseMetaHeader) - meta.SetTTL(1) - - resp := new(accounting.BalanceResponse) - resp.SetBody(body) - resp.SetMetaHeader(meta) - - // add level to meta header matryoshka - meta = new(session.ResponseMetaHeader) - testResponseSign(t, signer, meta, resp) - - // corrupt body - dec.SetValue(dec.GetValue() + 1) - - // verification must fail - require.Error(t, verifyServiceMessage(resp)) - - // restore body - dec.SetValue(dec.GetValue() - 1) - - testResponseMeta(t, meta, resp) -} - -func TestCreateRequest(t *testing.T) { - signer := test.RandomSignerRFC6979(t) - id := signer.UserID() - - var ownerID refs.OwnerID - id.WriteToV2(&ownerID) - - body := session.CreateRequestBody{} - body.SetOwnerID(&ownerID) - body.SetExpiration(100) - - meta := &session.RequestMetaHeader{} - meta.SetTTL(1) - - req := &session.CreateRequest{} - req.SetBody(&body) - req.SetMetaHeader(meta) - - // add level to meta header matryoshka - meta = &session.RequestMetaHeader{} - testRequestSign(t, signer, meta, req) - - testOwner(t, &ownerID, req) - - // corrupt body - body.SetExpiration(body.GetExpiration() + 1) - - // verification must fail - require.Error(t, verifyServiceMessage(req)) - - // restore body - body.SetExpiration(body.GetExpiration() - 1) - - testRequestMeta(t, meta, req) -} - -func TestCreateResponse(t *testing.T) { - signer := test.RandomSignerRFC6979(t) - - id := make([]byte, 8) - _, err := rand.Read(id) - require.NoError(t, err) - - sessionKey := make([]byte, 8) - _, err = rand.Read(sessionKey) - require.NoError(t, err) - - body := session.CreateResponseBody{} - body.SetID(id) - body.SetSessionKey(sessionKey) - - meta := &session.ResponseMetaHeader{} - meta.SetTTL(1) - - req := &session.CreateResponse{} - req.SetBody(&body) - req.SetMetaHeader(meta) - - // add level to meta header matryoshka - meta = &session.ResponseMetaHeader{} - testResponseSign(t, signer, meta, req) - - // corrupt body - body.SetID([]byte{1}) - // verification must fail - require.Error(t, verifyServiceMessage(req)) - // restore body - body.SetID(id) - - // corrupt body - body.SetSessionKey([]byte{1}) - // verification must fail - require.Error(t, verifyServiceMessage(req)) - // restore body - body.SetSessionKey(id) - - testResponseMeta(t, meta, req) -} diff --git a/client/status/common.go b/client/status/common.go index e3e5b0463..9568cf8b6 100644 --- a/client/status/common.go +++ b/client/status/common.go @@ -3,298 +3,214 @@ package apistatus import ( "encoding/binary" "errors" + "fmt" - "github.com/nspcc-dev/neofs-api-go/v2/status" + "github.com/nspcc-dev/neofs-sdk-go/api/status" ) // Error describes common error which is a grouping type for any [apistatus] errors. Any [apistatus] error may be checked // explicitly via it's type of just check the group via errors.Is(err, [apistatus.Error]). var Error = errors.New("api error") +// Common error instances which may be used to check API errors against using +// [errors.Is]. All of them MUST NOT be changed. var ( - // ErrServerInternal is an instance of ServerInternal error status. It's expected to be used for [errors.Is] - // and MUST NOT be changed. - ErrServerInternal ServerInternal - // ErrWrongMagicNumber is an instance of WrongMagicNumber error status. It's expected to be used for [errors.Is] - // and MUST NOT be changed. - ErrWrongMagicNumber WrongMagicNumber - // ErrSignatureVerification is an instance of SignatureVerification error status. It's expected to be used for [errors.Is] - // and MUST NOT be changed. - ErrSignatureVerification SignatureVerification - // ErrNodeUnderMaintenance is an instance of NodeUnderMaintenance error status. It's expected to be used for [errors.Is] - // and MUST NOT be changed. - ErrNodeUnderMaintenance NodeUnderMaintenance + ErrServerInternal InternalServerError + ErrWrongNetMagic WrongNetMagic + ErrSignatureVerification SignatureVerificationFailure + ErrNodeUnderMaintenance NodeUnderMaintenance ) -// ServerInternal describes failure statuses related to internal server errors. -// Instances provide [StatusV2] and error interfaces. +// InternalServerError describes failure statuses related to internal server +// errors. // // The status is purely informative, the client should not go into details of the error except for debugging needs. -type ServerInternal struct { - v2 status.Status -} +type InternalServerError string -func (x ServerInternal) Error() string { - return errMessageStatusV2( - globalizeCodeV2(status.Internal, status.GlobalizeCommonFail), - x.v2.Message(), - ) +// NewInternalServerError constructs internal server error with specified cause. +func NewInternalServerError(cause error) InternalServerError { + return InternalServerError(cause.Error()) } -// Is implements interface for correct checking current error type with [errors.Is]. -func (x ServerInternal) Is(target error) bool { - switch target.(type) { - default: - return errors.Is(Error, target) - case ServerInternal, *ServerInternal: - return true +// Error implements built-in error interface. +func (x InternalServerError) Error() string { + const desc = "internal server error" + if x != "" { + return fmt.Sprintf(errFmt, status.InternalServerError, desc, string(x)) } + return fmt.Sprintf(errFmtNoMessage, status.InternalServerError, desc) } -// implements local interface defined in [ErrorFromV2] func. -func (x *ServerInternal) fromStatusV2(st *status.Status) { - x.v2 = *st -} +// Is checks whether target is of type InternalServerError, *InternalServerError +// or [Error]. Is implements interface consumed by [errors.Is]. +func (x InternalServerError) Is(target error) bool { return errorIs(x, target) } -// ErrorToV2 implements [StatusV2] interface method. -// If the value was returned by [ErrorFromV2], returns the source message. -// Otherwise, returns message with -// - code: INTERNAL; -// - string message: empty; -// - details: empty. -func (x ServerInternal) ErrorToV2() *status.Status { - x.v2.SetCode(globalizeCodeV2(status.Internal, status.GlobalizeCommonFail)) - return &x.v2 +func (x *InternalServerError) readFromV2(m *status.Status) error { + if m.Code != status.InternalServerError { + panic(fmt.Sprintf("unexpected code %d instead of %d", m.Code, status.InternalServerError)) + } + if len(m.Details) > 0 { + return errors.New("details attached but not supported") + } + *x = InternalServerError(m.Message) + return nil } -// SetMessage sets message describing internal error. -// -// Message should be used for debug purposes only. -func (x *ServerInternal) SetMessage(msg string) { - x.v2.SetMessage(msg) +// ErrorToV2 implements [StatusV2] interface method. +func (x InternalServerError) ErrorToV2() *status.Status { + return &status.Status{Code: status.InternalServerError, Message: string(x)} } -// Message returns message describing internal server error. -// -// Message should be used for debug purposes only. By default, it is empty. -func (x ServerInternal) Message() string { - return x.v2.Message() +// WrongNetMagic describes failure status related to incorrect network magic. +type WrongNetMagic struct { + correctSet bool + correct uint64 + msg string } -// WriteInternalServerErr writes err message to ServerInternal instance. -func WriteInternalServerErr(x *ServerInternal, err error) { - x.SetMessage(err.Error()) +// NewWrongNetMagicError constructs wrong network magic error indicating the +// correct value. +func NewWrongNetMagicError(correct uint64) WrongNetMagic { + return WrongNetMagic{ + correctSet: true, + correct: correct, + } } -// WrongMagicNumber describes failure status related to incorrect network magic. -// Instances provide [StatusV2] and error interfaces. -type WrongMagicNumber struct { - v2 status.Status +// Error implements built-in error interface. +func (x WrongNetMagic) Error() string { + const desc = "wrong network magic" + if x.msg != "" { + if x.correctSet { + return fmt.Sprintf(errFmt, status.WrongNetMagic, fmt.Sprintf("%s, expected %d", desc, x.correct), x.msg) + } + return fmt.Sprintf(errFmt, status.WrongNetMagic, desc, x.msg) + } + if x.correctSet { + return fmt.Sprintf(errFmtNoMessage, status.WrongNetMagic, fmt.Sprintf("%s, expected %d", desc, x.correct)) + } + return fmt.Sprintf(errFmtNoMessage, status.WrongNetMagic, desc) } -func (x WrongMagicNumber) Error() string { - return errMessageStatusV2( - globalizeCodeV2(status.WrongMagicNumber, status.GlobalizeCommonFail), - x.v2.Message(), - ) -} +// Is checks whether target is of type WrongNetMagic, *WrongNetMagic or [Error]. +// Is implements interface consumed by [errors.Is]. +func (x WrongNetMagic) Is(target error) bool { return errorIs(x, target) } -// Is implements interface for correct checking current error type with [errors.Is]. -func (x WrongMagicNumber) Is(target error) bool { - switch target.(type) { - default: - return errors.Is(Error, target) - case WrongMagicNumber, *WrongMagicNumber: - return true +func (x *WrongNetMagic) readFromV2(m *status.Status) error { + if m.Code != status.WrongNetMagic { + panic(fmt.Sprintf("unexpected code %d instead of %d", m.Code, status.WrongNetMagic)) } -} - -// implements local interface defined in [ErrorFromV2] func. -func (x *WrongMagicNumber) fromStatusV2(st *status.Status) { - x.v2 = *st + if len(m.Details) > 0 { + if len(m.Details) > 1 { + return fmt.Errorf("too many details (%d)", len(m.Details)) + } + if m.Details[0].Id != status.DetailCorrectNetMagic { + return fmt.Errorf("unsupported detail ID=%d", m.Details[0].Id) + } + if len(m.Details[0].Value) != 8 { + return fmt.Errorf("invalid correct value detail: invalid length %d", len(m.Details[0].Value)) + } + x.correct = binary.BigEndian.Uint64(m.Details[0].Value) + x.correctSet = true + } else { + x.correctSet = false + } + x.msg = m.Message + return nil } // ErrorToV2 implements [StatusV2] interface method. -// If the value was returned by [ErrorFromV2], returns the source message. -// Otherwise, returns message with -// - code: WRONG_MAGIC_NUMBER; -// - string message: empty; -// - details: empty. -func (x WrongMagicNumber) ErrorToV2() *status.Status { - x.v2.SetCode(globalizeCodeV2(status.WrongMagicNumber, status.GlobalizeCommonFail)) - return &x.v2 +func (x WrongNetMagic) ErrorToV2() *status.Status { + st := status.Status{Code: status.WrongNetMagic, Message: x.msg} + if x.correctSet { + st.Details = []*status.Status_Detail{{ + Id: status.DetailCorrectNetMagic, + Value: make([]byte, 8), + }} + binary.BigEndian.PutUint64(st.Details[0].Value, x.correct) + } + return &st } -// WriteCorrectMagic writes correct network magic. -func (x *WrongMagicNumber) WriteCorrectMagic(magic uint64) { - // serialize the number - buf := make([]byte, 8) - - binary.BigEndian.PutUint64(buf, magic) - - // create corresponding detail - var d status.Detail - - d.SetID(status.DetailIDCorrectMagic) - d.SetValue(buf) - - // attach the detail - x.v2.AppendDetails(d) +// CorrectMagic returns network magic returned by the server. Returns zero if +// the value was not attached. +func (x WrongNetMagic) CorrectMagic() uint64 { + if x.correctSet { + return x.correct + } + return 0 } -// CorrectMagic returns network magic returned by the server. -// Second value indicates presence status: -// - -1 if number is presented in incorrect format -// - 0 if number is not presented -// - +1 otherwise -func (x WrongMagicNumber) CorrectMagic() (magic uint64, ok int8) { - x.v2.IterateDetails(func(d *status.Detail) bool { - if d.ID() == status.DetailIDCorrectMagic { - if val := d.Value(); len(val) == 8 { - magic = binary.BigEndian.Uint64(val) - ok = 1 - } else { - ok = -1 - } - } +// SignatureVerificationFailure describes failure status related to signature +// verification failures. +type SignatureVerificationFailure string - return ok != 0 - }) - - return +// NewSignatureVerificationFailure constructs signature verification error with +// specified cause. +func NewSignatureVerificationFailure(cause error) SignatureVerificationFailure { + return SignatureVerificationFailure(cause.Error()) } -// SignatureVerification describes failure status related to signature verification. -// Instances provide [StatusV2] and error interfaces. -type SignatureVerification struct { - v2 status.Status +// Error implements built-in error interface. +func (x SignatureVerificationFailure) Error() string { + const desc = "signature verification failed" + if x != "" { + return fmt.Sprintf(errFmt, status.SignatureVerificationFail, desc, string(x)) + } + return fmt.Sprintf(errFmtNoMessage, status.SignatureVerificationFail, desc) } -const defaultSignatureVerificationMsg = "signature verification failed" +// Is checks whether target is of type SignatureVerificationFailure, +// *SignatureVerificationFailure or [Error]. Is implements interface consumed by +// [errors.Is]. +func (x SignatureVerificationFailure) Is(target error) bool { return errorIs(x, target) } -func (x SignatureVerification) Error() string { - msg := x.v2.Message() - if msg == "" { - msg = defaultSignatureVerificationMsg +func (x *SignatureVerificationFailure) readFromV2(m *status.Status) error { + if m.Code != status.SignatureVerificationFail { + panic(fmt.Sprintf("unexpected code %d instead of %d", m.Code, status.SignatureVerificationFail)) } - - return errMessageStatusV2( - globalizeCodeV2(status.SignatureVerificationFail, status.GlobalizeCommonFail), - msg, - ) -} - -// Is implements interface for correct checking current error type with [errors.Is]. -func (x SignatureVerification) Is(target error) bool { - switch target.(type) { - default: - return errors.Is(Error, target) - case SignatureVerification, *SignatureVerification: - return true + if len(m.Details) > 0 { + return errors.New("details attached but not supported") } -} - -// implements local interface defined in [ErrorFromV2] func. -func (x *SignatureVerification) fromStatusV2(st *status.Status) { - x.v2 = *st + *x = SignatureVerificationFailure(m.Message) + return nil } // ErrorToV2 implements [StatusV2] interface method. -// If the value was returned by [ErrorFromV2], returns the source message. -// Otherwise, returns message with -// - code: SIGNATURE_VERIFICATION_FAIL; -// - string message: written message via [SignatureVerification.SetMessage] or -// "signature verification failed" as a default message; -// - details: empty. -func (x SignatureVerification) ErrorToV2() *status.Status { - x.v2.SetCode(globalizeCodeV2(status.SignatureVerificationFail, status.GlobalizeCommonFail)) - - if x.v2.Message() == "" { - x.v2.SetMessage(defaultSignatureVerificationMsg) - } - - return &x.v2 -} - -// SetMessage writes signature verification failure message. -// Message should be used for debug purposes only. -// -// See also Message. -func (x *SignatureVerification) SetMessage(v string) { - x.v2.SetMessage(v) -} - -// Message returns status message. Zero status returns empty message. -// Message should be used for debug purposes only. -// -// See also SetMessage. -func (x SignatureVerification) Message() string { - return x.v2.Message() -} - -// NodeUnderMaintenance describes failure status for nodes being under maintenance. -// Instances provide [StatusV2] and error interfaces. -type NodeUnderMaintenance struct { - v2 status.Status +func (x SignatureVerificationFailure) ErrorToV2() *status.Status { + return &status.Status{Code: status.SignatureVerificationFail, Message: string(x)} } -const defaultNodeUnderMaintenanceMsg = "node is under maintenance" +// NodeUnderMaintenance describes failure status for nodes being under +// maintenance. +type NodeUnderMaintenance struct{ msg string } -// Error implements the error interface. +// Error implements built-in error interface. func (x NodeUnderMaintenance) Error() string { - msg := x.Message() - if msg == "" { - msg = defaultNodeUnderMaintenanceMsg + const desc = "node is under maintenance" + if x.msg != "" { + return fmt.Sprintf(errFmt, status.NodeUnderMaintenance, desc, x.msg) } - - return errMessageStatusV2( - globalizeCodeV2(status.NodeUnderMaintenance, status.GlobalizeCommonFail), - msg, - ) + return fmt.Sprintf(errFmtNoMessage, status.NodeUnderMaintenance, desc) } -// Is implements interface for correct checking current error type with [errors.Is]. -func (x NodeUnderMaintenance) Is(target error) bool { - switch target.(type) { - default: - return errors.Is(Error, target) - case NodeUnderMaintenance, *NodeUnderMaintenance: - return true - } -} +// Is checks whether target is of type NodeUnderMaintenance, +// *NodeUnderMaintenance or [Error]. Is implements interface consumed by +// [errors.Is]. +func (x NodeUnderMaintenance) Is(target error) bool { return errorIs(x, target) } -func (x *NodeUnderMaintenance) fromStatusV2(st *status.Status) { - x.v2 = *st +func (x *NodeUnderMaintenance) readFromV2(m *status.Status) error { + if m.Code != status.NodeUnderMaintenance { + panic(fmt.Sprintf("unexpected code %d instead of %d", m.Code, status.NodeUnderMaintenance)) + } + if len(m.Details) > 0 { + return errors.New("details attached but not supported") + } + x.msg = m.Message + return nil } // ErrorToV2 implements [StatusV2] interface method. -// If the value was returned by [ErrorFromV2], returns the source message. -// Otherwise, returns message with -// - code: NODE_UNDER_MAINTENANCE; -// - string message: written message via [NodeUnderMaintenance.SetMessage] or -// "node is under maintenance" as a default message; -// - details: empty. func (x NodeUnderMaintenance) ErrorToV2() *status.Status { - x.v2.SetCode(globalizeCodeV2(status.NodeUnderMaintenance, status.GlobalizeCommonFail)) - if x.v2.Message() == "" { - x.v2.SetMessage(defaultNodeUnderMaintenanceMsg) - } - - return &x.v2 -} - -// SetMessage writes signature verification failure message. -// Message should be used for debug purposes only. -// -// See also Message. -func (x *NodeUnderMaintenance) SetMessage(v string) { - x.v2.SetMessage(v) -} - -// Message returns status message. Zero status returns empty message. -// Message should be used for debug purposes only. -// -// See also SetMessage. -func (x NodeUnderMaintenance) Message() string { - return x.v2.Message() + return &status.Status{Code: status.NodeUnderMaintenance, Message: x.msg} } diff --git a/client/status/common_test.go b/client/status/common_test.go index 5576f480f..18a643f65 100644 --- a/client/status/common_test.go +++ b/client/status/common_test.go @@ -1,130 +1,186 @@ package apistatus_test import ( + "errors" "testing" - "github.com/nspcc-dev/neofs-api-go/v2/status" + "github.com/nspcc-dev/neofs-sdk-go/api/status" apistatus "github.com/nspcc-dev/neofs-sdk-go/client/status" "github.com/stretchr/testify/require" ) -func TestServerInternal_Message(t *testing.T) { - const msg = "some message" - - var st apistatus.ServerInternal - - res := st.Message() - resv2 := apistatus.ErrorToV2(st).Message() - require.Empty(t, res) - require.Empty(t, resv2) - - st.SetMessage(msg) - - res = st.Message() - resv2 = apistatus.ErrorToV2(st).Message() - require.Equal(t, msg, res) - require.Equal(t, msg, resv2) +func TestInternalServerError_Error(t *testing.T) { + var e apistatus.InternalServerError + require.EqualError(t, e, "status: code = 1024 (internal server error)") + e = apistatus.NewInternalServerError(errors.New("some reason")) + require.EqualError(t, e, "status: code = 1024 (internal server error) message = some reason") } -func TestWrongMagicNumber_CorrectMagic(t *testing.T) { - const magic = 1337 - - var st apistatus.WrongMagicNumber - - res, ok := st.CorrectMagic() - require.Zero(t, res) - require.Zero(t, ok) - - st.WriteCorrectMagic(magic) - - res, ok = st.CorrectMagic() - require.EqualValues(t, magic, res) - require.EqualValues(t, 1, ok) - - // corrupt the value - apistatus.ErrorToV2(st).IterateDetails(func(d *status.Detail) bool { - d.SetValue([]byte{1, 2, 3}) // any slice with len != 8 - return true - }) - - _, ok = st.CorrectMagic() - require.EqualValues(t, -1, ok) +func TestInternalServerError_Is(t *testing.T) { + assertErrorIs(t, apistatus.ErrServerInternal) } -func TestSignatureVerification(t *testing.T) { - t.Run("default", func(t *testing.T) { - var st apistatus.SignatureVerification - - require.Empty(t, st.Message()) - }) - - t.Run("custom message", func(t *testing.T) { - var st apistatus.SignatureVerification - msg := "some message" - - st.SetMessage(msg) - - stV2 := st.ErrorToV2() - - require.Equal(t, msg, st.Message()) - require.Equal(t, msg, stV2.Message()) - }) - - t.Run("empty to V2", func(t *testing.T) { - var st apistatus.SignatureVerification - - stV2 := st.ErrorToV2() - - require.Equal(t, "signature verification failed", stV2.Message()) - }) - - t.Run("non-empty to V2", func(t *testing.T) { - var st apistatus.SignatureVerification - msg := "some other msg" - - st.SetMessage(msg) - - stV2 := st.ErrorToV2() +func TestInternalServerError_As(t *testing.T) { + var src, dst apistatus.InternalServerError + require.ErrorAs(t, src, &dst) + require.EqualValues(t, "", dst) + require.Equal(t, src.Error(), dst.Error()) + require.Equal(t, src.ErrorToV2(), dst.ErrorToV2()) + + src = apistatus.NewInternalServerError(errors.New("some reason")) + require.ErrorAs(t, src, &dst) + require.EqualValues(t, "some reason", dst) + require.Equal(t, src.Error(), dst.Error()) + require.Equal(t, src.ErrorToV2(), dst.ErrorToV2()) +} - require.Equal(t, msg, stV2.Message()) - }) +func TestInternalServerError_ErrorToV2(t *testing.T) { + var e apistatus.InternalServerError + st := e.ErrorToV2() + require.EqualValues(t, 1024, st.Code) + require.Zero(t, st.Message) + require.Zero(t, st.Details) + + e = apistatus.NewInternalServerError(errors.New("some reason")) + st = e.ErrorToV2() + require.EqualValues(t, 1024, st.Code) + require.Equal(t, "some reason", st.Message) + require.Zero(t, st.Details) } -func TestNodeUnderMaintenance(t *testing.T) { - t.Run("default", func(t *testing.T) { - var st apistatus.NodeUnderMaintenance +func TestWrongNetMagic_Error(t *testing.T) { + var e apistatus.WrongNetMagic + require.EqualError(t, e, "status: code = 1025 (wrong network magic)") + e = apistatus.NewWrongNetMagicError(4594136436) + require.EqualError(t, e, "status: code = 1025 (wrong network magic, expected 4594136436)") + + e2, err := apistatus.ErrorFromV2(&status.Status{Code: 1025, Message: "any message"}) + require.NoError(t, err) + require.ErrorAs(t, e2, &e) + require.EqualError(t, e, "status: code = 1025 (wrong network magic) message = any message") + + e2, err = apistatus.ErrorFromV2(&status.Status{Code: 1025, Message: "any message", Details: []*status.Status_Detail{{ + Id: 0, + Value: []byte{0, 0, 0, 1, 107, 18, 46, 11}, + }}}) + require.NoError(t, err) + require.ErrorAs(t, e2, &e) + require.EqualError(t, e, "status: code = 1025 (wrong network magic, expected 6091320843) message = any message") +} - require.Empty(t, st.Message()) - }) +func TestWrongNetMagic_Is(t *testing.T) { + assertErrorIs(t, apistatus.ErrWrongNetMagic) +} - t.Run("custom message", func(t *testing.T) { - var st apistatus.NodeUnderMaintenance - msg := "some message" +func TestWrongNetMagic_As(t *testing.T) { + var e, dst apistatus.WrongNetMagic + require.ErrorAs(t, e, &dst) + require.EqualValues(t, e.CorrectMagic(), dst.CorrectMagic()) + require.Equal(t, e.Error(), dst.Error()) + require.Equal(t, e.ErrorToV2(), dst.ErrorToV2()) + + e = apistatus.NewWrongNetMagicError(3254368) + require.ErrorAs(t, e, &dst) + require.EqualValues(t, 3254368, dst.CorrectMagic()) + require.Equal(t, e.Error(), dst.Error()) + require.Equal(t, e.ErrorToV2(), dst.ErrorToV2()) +} - st.SetMessage(msg) +func TestWrongNetMagic_ErrorToV2(t *testing.T) { + var e apistatus.WrongNetMagic + st := e.ErrorToV2() + require.EqualValues(t, 1025, st.Code) + require.Zero(t, st.Message) + require.Zero(t, st.Details) + + e = apistatus.NewWrongNetMagicError(6091320843) + st = e.ErrorToV2() + require.EqualValues(t, 1025, st.Code) + require.Zero(t, st.Message) + require.Equal(t, []*status.Status_Detail{{ + Id: 0, + Value: []byte{0, 0, 0, 1, 107, 18, 46, 11}, + }}, st.Details) +} - stV2 := st.ErrorToV2() +func TestSignatureVerificationFailure_Error(t *testing.T) { + var e apistatus.SignatureVerificationFailure + require.EqualError(t, e, "status: code = 1026 (signature verification failed)") + e = apistatus.NewSignatureVerificationFailure(errors.New("some reason")) + require.EqualError(t, e, "status: code = 1026 (signature verification failed) message = some reason") +} - require.Equal(t, msg, st.Message()) - require.Equal(t, msg, stV2.Message()) - }) +func TestSignatureVerificationFailure_Is(t *testing.T) { + assertErrorIs(t, apistatus.ErrSignatureVerification) +} - t.Run("empty to V2", func(t *testing.T) { - var st apistatus.NodeUnderMaintenance +func TestSignatureVerificationFailure_As(t *testing.T) { + var src, dst apistatus.SignatureVerificationFailure + require.ErrorAs(t, src, &dst) + require.EqualValues(t, "", dst) + require.Equal(t, src.Error(), dst.Error()) + require.Equal(t, src.ErrorToV2(), dst.ErrorToV2()) + src = apistatus.NewSignatureVerificationFailure(errors.New("some reason")) + require.ErrorAs(t, src, &dst) + require.EqualValues(t, "some reason", dst) + require.Equal(t, src.Error(), dst.Error()) + require.Equal(t, src.ErrorToV2(), dst.ErrorToV2()) +} - stV2 := st.ErrorToV2() +func TestSignatureVerificationFailure_ErrorToV2(t *testing.T) { + var e apistatus.SignatureVerificationFailure + st := e.ErrorToV2() + require.EqualValues(t, 1026, st.Code) + require.Zero(t, st.Message) + require.Zero(t, st.Details) + + e = apistatus.NewSignatureVerificationFailure(errors.New("some reason")) + st = e.ErrorToV2() + require.EqualValues(t, 1026, st.Code) + require.Equal(t, "some reason", st.Message) + require.Zero(t, st.Details) +} - require.Empty(t, "", stV2.Message()) - }) +func TestNodeUnderMaintenance_Error(t *testing.T) { + var e apistatus.NodeUnderMaintenance + require.EqualError(t, e, "status: code = 1027 (node is under maintenance)") - t.Run("non-empty to V2", func(t *testing.T) { - var st apistatus.NodeUnderMaintenance - msg := "some other msg" + e2, err := apistatus.ErrorFromV2(&status.Status{Code: 1027, Message: "any message"}) + require.NoError(t, err) + require.ErrorAs(t, e2, &e) + require.EqualError(t, e, "status: code = 1027 (node is under maintenance) message = any message") +} - st.SetMessage(msg) +func TestNodeUnderMaintenance_Is(t *testing.T) { + assertErrorIs(t, apistatus.ErrNodeUnderMaintenance) +} - stV2 := st.ErrorToV2() +func TestNodeUnderMaintenance_As(t *testing.T) { + var src, dst apistatus.NodeUnderMaintenance + require.ErrorAs(t, src, &dst) + require.Equal(t, src.Error(), dst.Error()) + require.Equal(t, src.ErrorToV2(), dst.ErrorToV2()) + + e, err := apistatus.ErrorFromV2(&status.Status{Code: 1027, Message: "any message"}) + require.NoError(t, err) + require.ErrorAs(t, e, &src) + require.ErrorAs(t, src, &dst) + require.Equal(t, src.Error(), dst.Error()) + require.Equal(t, src.ErrorToV2(), dst.ErrorToV2()) +} - require.Equal(t, msg, stV2.Message()) - }) +func TestNodeUnderMaintenance_ErrorToV2(t *testing.T) { + var e apistatus.NodeUnderMaintenance + st := e.ErrorToV2() + require.EqualValues(t, 1027, st.Code) + require.Zero(t, st.Message) + require.Zero(t, st.Details) + + e2, err := apistatus.ErrorFromV2(&status.Status{Code: 1027, Message: "any message"}) + require.NoError(t, err) + require.ErrorAs(t, e2, &e) + st = e.ErrorToV2() + require.EqualValues(t, 1027, st.Code) + require.Equal(t, "any message", st.Message) + require.Zero(t, st.Details) } diff --git a/client/status/container.go b/client/status/container.go index 526d46fea..5bd5a6a06 100644 --- a/client/status/container.go +++ b/client/status/container.go @@ -2,111 +2,90 @@ package apistatus import ( "errors" + "fmt" - "github.com/nspcc-dev/neofs-api-go/v2/container" - "github.com/nspcc-dev/neofs-api-go/v2/status" + "github.com/nspcc-dev/neofs-sdk-go/api/status" ) +// Container error instances which may be used to check API errors against using +// [errors.Is]. All of them MUST NOT be changed. var ( - // ErrEACLNotFound is an instance of EACLNotFound error status. It's expected to be used for [errors.Is] - // and MUST NOT be changed. - ErrEACLNotFound EACLNotFound - // ErrContainerNotFound is an instance of ContainerNotFound error status. It's expected to be used for [errors.Is] - // and MUST NOT be changed. + ErrEACLNotFound EACLNotFound ErrContainerNotFound ContainerNotFound ) -// ContainerNotFound describes status of the failure because of the missing container. -// Instances provide [StatusV2] and error interfaces. -type ContainerNotFound struct { - v2 status.Status -} +// ContainerNotFound describes status of the failure because of the missing +// container. +type ContainerNotFound string -const defaultContainerNotFoundMsg = "container not found" +// NewContainerNotFoundError constructs missing container error with specified +// cause. +func NewContainerNotFoundError(cause error) ContainerNotFound { + return ContainerNotFound(cause.Error()) +} +// Error implements built-in error interface. func (x ContainerNotFound) Error() string { - msg := x.v2.Message() - if msg == "" { - msg = defaultContainerNotFoundMsg + const desc = "container not found" + if x != "" { + return fmt.Sprintf(errFmt, status.ContainerNotFound, desc, string(x)) } - - return errMessageStatusV2( - globalizeCodeV2(container.StatusNotFound, container.GlobalizeFail), - msg, - ) + return fmt.Sprintf(errFmtNoMessage, status.ContainerNotFound, desc) } -// Is implements interface for correct checking current error type with [errors.Is]. -func (x ContainerNotFound) Is(target error) bool { - switch target.(type) { - default: - return errors.Is(Error, target) - case ContainerNotFound, *ContainerNotFound: - return true - } -} +// Is checks whether target is of type ContainerNotFound, *ContainerNotFound or +// [Error]. Is implements interface consumed by [errors.Is]. +func (x ContainerNotFound) Is(target error) bool { return errorIs(x, target) } -// implements local interface defined in [ErrorFromV2] func. -func (x *ContainerNotFound) fromStatusV2(st *status.Status) { - x.v2 = *st +func (x *ContainerNotFound) readFromV2(m *status.Status) error { + if m.Code != status.ContainerNotFound { + panic(fmt.Sprintf("unexpected code %d instead of %d", m.Code, status.ContainerNotFound)) + } + if len(m.Details) > 0 { + return errors.New("details attached but not supported") + } + *x = ContainerNotFound(m.Message) + return nil } // ErrorToV2 implements [StatusV2] interface method. -// If the value was returned by [ErrorFromV2], returns the source message. -// Otherwise, returns message with -// - code: CONTAINER_NOT_FOUND; -// - string message: "container not found"; -// - details: empty. func (x ContainerNotFound) ErrorToV2() *status.Status { - x.v2.SetCode(globalizeCodeV2(container.StatusNotFound, container.GlobalizeFail)) - x.v2.SetMessage(defaultContainerNotFoundMsg) - return &x.v2 + return &status.Status{Code: status.ContainerNotFound, Message: string(x)} } -// EACLNotFound describes status of the failure because of the missing eACL -// table. -// Instances provide [StatusV2] and error interfaces. -type EACLNotFound struct { - v2 status.Status -} +// EACLNotFound describes status of the failure because of the missing eACL. +type EACLNotFound string -const defaultEACLNotFoundMsg = "eACL not found" +// NewEACLNotFoundError constructs missing eACL error with specified cause. +func NewEACLNotFoundError(cause error) EACLNotFound { + return EACLNotFound(cause.Error()) +} +// Error implements built-in error interface. func (x EACLNotFound) Error() string { - msg := x.v2.Message() - if msg == "" { - msg = defaultEACLNotFoundMsg + const desc = "eACL not found" + if x != "" { + return fmt.Sprintf(errFmt, status.EACLNotFound, desc, string(x)) } - - return errMessageStatusV2( - globalizeCodeV2(container.StatusEACLNotFound, container.GlobalizeFail), - msg, - ) + return fmt.Sprintf(errFmtNoMessage, status.EACLNotFound, desc) } -// Is implements interface for correct checking current error type with [errors.Is]. -func (x EACLNotFound) Is(target error) bool { - switch target.(type) { - default: - return errors.Is(Error, target) - case EACLNotFound, *EACLNotFound: - return true - } -} +// Is checks whether target is of type EACLNotFound, *EACLNotFound or [Error]. +// Is implements interface consumed by [errors.Is]. +func (x EACLNotFound) Is(target error) bool { return errorIs(x, target) } -// implements local interface defined in [ErrorFromV2] func. -func (x *EACLNotFound) fromStatusV2(st *status.Status) { - x.v2 = *st +func (x *EACLNotFound) readFromV2(m *status.Status) error { + if m.Code != status.EACLNotFound { + panic(fmt.Sprintf("unexpected code %d instead of %d", m.Code, status.EACLNotFound)) + } + if len(m.Details) > 0 { + return errors.New("details attached but not supported") + } + *x = EACLNotFound(m.Message) + return nil } // ErrorToV2 implements [StatusV2] interface method. -// If the value was returned by [ErrorFromV2], returns the source message. -// Otherwise, returns message with -// - code: EACL_NOT_FOUND; -// - string message: "eACL not found"; -// - details: empty. func (x EACLNotFound) ErrorToV2() *status.Status { - x.v2.SetCode(globalizeCodeV2(container.StatusEACLNotFound, container.GlobalizeFail)) - x.v2.SetMessage(defaultEACLNotFoundMsg) - return &x.v2 + return &status.Status{Code: status.EACLNotFound, Message: string(x)} } diff --git a/client/status/container_test.go b/client/status/container_test.go new file mode 100644 index 000000000..621d7b90f --- /dev/null +++ b/client/status/container_test.go @@ -0,0 +1,87 @@ +package apistatus_test + +import ( + "errors" + "testing" + + apistatus "github.com/nspcc-dev/neofs-sdk-go/client/status" + "github.com/stretchr/testify/require" +) + +func TestContainerNotFound_Error(t *testing.T) { + var e apistatus.ContainerNotFound + require.EqualError(t, e, "status: code = 3072 (container not found)") + e = apistatus.NewContainerNotFoundError(errors.New("some reason")) + require.EqualError(t, e, "status: code = 3072 (container not found) message = some reason") +} + +func TestContainerNotFound_Is(t *testing.T) { + assertErrorIs(t, apistatus.ErrContainerNotFound) +} + +func TestContainerNotFound_As(t *testing.T) { + var src, dst apistatus.ContainerNotFound + require.ErrorAs(t, src, &dst) + require.EqualValues(t, "", dst) + require.Equal(t, src.Error(), dst.Error()) + require.Equal(t, src.ErrorToV2(), dst.ErrorToV2()) + + src = apistatus.NewContainerNotFoundError(errors.New("some reason")) + require.ErrorAs(t, src, &dst) + require.EqualValues(t, "some reason", dst) + require.Equal(t, src.Error(), dst.Error()) + require.Equal(t, src.ErrorToV2(), dst.ErrorToV2()) +} + +func TestContainerNotFound_ErrorToV2(t *testing.T) { + var e apistatus.ContainerNotFound + st := e.ErrorToV2() + require.EqualValues(t, 3072, st.Code) + require.Zero(t, st.Message) + require.Zero(t, st.Details) + + e = apistatus.NewContainerNotFoundError(errors.New("some reason")) + st = e.ErrorToV2() + require.EqualValues(t, 3072, st.Code) + require.Equal(t, "some reason", st.Message) + require.Zero(t, st.Details) +} + +func TestEACLNotFound_Error(t *testing.T) { + var e apistatus.EACLNotFound + require.EqualError(t, e, "status: code = 3073 (eACL not found)") + e = apistatus.NewEACLNotFoundError(errors.New("some reason")) + require.EqualError(t, e, "status: code = 3073 (eACL not found) message = some reason") +} + +func TestEACLNotFound_Is(t *testing.T) { + assertErrorIs(t, apistatus.ErrEACLNotFound) +} + +func TestEACLNotFound_As(t *testing.T) { + var src, dst apistatus.EACLNotFound + require.ErrorAs(t, src, &dst) + require.EqualValues(t, "", dst) + require.Equal(t, src.Error(), dst.Error()) + require.Equal(t, src.ErrorToV2(), dst.ErrorToV2()) + + src = apistatus.NewEACLNotFoundError(errors.New("some reason")) + require.ErrorAs(t, src, &dst) + require.EqualValues(t, "some reason", dst) + require.Equal(t, src.Error(), dst.Error()) + require.Equal(t, src.ErrorToV2(), dst.ErrorToV2()) +} + +func TestEACLNotFound_ErrorToV2(t *testing.T) { + var e apistatus.EACLNotFound + st := e.ErrorToV2() + require.EqualValues(t, 3073, st.Code) + require.Zero(t, st.Message) + require.Zero(t, st.Details) + + e = apistatus.NewEACLNotFoundError(errors.New("some reason")) + st = e.ErrorToV2() + require.EqualValues(t, 3073, st.Code) + require.Equal(t, "some reason", st.Message) + require.Zero(t, st.Details) +} diff --git a/client/status/errors_test.go b/client/status/errors_test.go index 49dd53a81..b4fd92adf 100644 --- a/client/status/errors_test.go +++ b/client/status/errors_test.go @@ -1,93 +1,33 @@ -package apistatus +package apistatus_test import ( "fmt" "testing" + apistatus "github.com/nspcc-dev/neofs-sdk-go/client/status" "github.com/stretchr/testify/require" ) -func TestErrors(t *testing.T) { - for _, tc := range []struct { - errs []error - errVariable error - }{ - { - errs: []error{ServerInternal{}, new(ServerInternal)}, - errVariable: ErrServerInternal, - }, - { - errs: []error{WrongMagicNumber{}, new(WrongMagicNumber)}, - errVariable: ErrWrongMagicNumber, - }, - { - errs: []error{SignatureVerification{}, new(SignatureVerification)}, - errVariable: ErrSignatureVerification, - }, - { - errs: []error{NodeUnderMaintenance{}, new(NodeUnderMaintenance)}, - errVariable: ErrNodeUnderMaintenance, - }, - - { - errs: []error{ObjectLocked{}, new(ObjectLocked)}, - errVariable: ErrObjectLocked, - }, - { - errs: []error{LockNonRegularObject{}, new(LockNonRegularObject)}, - errVariable: ErrLockNonRegularObject, - }, - { - errs: []error{ObjectAccessDenied{}, new(ObjectAccessDenied)}, - errVariable: ErrObjectAccessDenied, - }, - { - errs: []error{ObjectNotFound{}, new(ObjectNotFound)}, - errVariable: ErrObjectNotFound, - }, - { - errs: []error{ObjectAlreadyRemoved{}, new(ObjectAlreadyRemoved)}, - errVariable: ErrObjectAlreadyRemoved, - }, - { - errs: []error{ObjectOutOfRange{}, new(ObjectOutOfRange)}, - errVariable: ErrObjectOutOfRange, - }, - - { - errs: []error{ContainerNotFound{}, new(ContainerNotFound)}, - errVariable: ErrContainerNotFound, - }, - { - errs: []error{EACLNotFound{}, new(EACLNotFound)}, - errVariable: ErrEACLNotFound, - }, - - { - errs: []error{SessionTokenExpired{}, new(SessionTokenExpired)}, - errVariable: ErrSessionTokenExpired, - }, - { - errs: []error{SessionTokenNotFound{}, new(SessionTokenNotFound)}, - errVariable: ErrSessionTokenNotFound, - }, - - { - errs: []error{UnrecognizedStatusV2{}, new(UnrecognizedStatusV2)}, - errVariable: ErrUnrecognizedStatusV2, - }, - } { - require.NotEmpty(t, tc.errs) - require.NotNil(t, tc.errVariable) - - for i := range tc.errs { - require.ErrorIs(t, tc.errs[i], tc.errVariable) - - wrapped := fmt.Errorf("some message %w", tc.errs[i]) - require.ErrorIs(t, wrapped, tc.errVariable) - - wrappedTwice := fmt.Errorf("another message %w", wrapped) - require.ErrorIs(t, wrappedTwice, tc.errVariable) - } - } +func assertErrorIs[T error, PTR interface { + *T + error +}](t testing.TB, constErr T) { + var e T + var pe PTR = &e + require.ErrorIs(t, e, e) + require.ErrorIs(t, e, pe) + require.ErrorIs(t, pe, e) + require.ErrorIs(t, pe, pe) + require.ErrorIs(t, e, apistatus.Error) + require.ErrorIs(t, e, constErr) + we := fmt.Errorf("wrapped %w", e) + require.ErrorIs(t, we, e) + require.ErrorIs(t, we, pe) + require.ErrorIs(t, we, apistatus.Error) + require.ErrorIs(t, we, constErr) + wwe := fmt.Errorf("again %w", e) + require.ErrorIs(t, wwe, e) + require.ErrorIs(t, wwe, pe) + require.ErrorIs(t, wwe, apistatus.Error) + require.ErrorIs(t, wwe, constErr) } diff --git a/client/status/object.go b/client/status/object.go index 906cd8f60..5b420c17b 100644 --- a/client/status/object.go +++ b/client/status/object.go @@ -2,322 +2,257 @@ package apistatus import ( "errors" + "fmt" + "unicode/utf8" - "github.com/nspcc-dev/neofs-api-go/v2/object" - "github.com/nspcc-dev/neofs-api-go/v2/status" + "github.com/nspcc-dev/neofs-sdk-go/api/status" ) +// Object error instances which may be used to check API errors against using +// [errors.Is]. All of them MUST NOT be changed. var ( - // ErrObjectLocked is an instance of ObjectLocked error status. It's expected to be used for [errors.Is] - // and MUST NOT be changed. - ErrObjectLocked ObjectLocked - // ErrObjectAlreadyRemoved is an instance of ObjectAlreadyRemoved error status. It's expected to be used for [errors.Is] - // and MUST NOT be changed. + ErrObjectAccessDenied ObjectAccessDenied + ErrObjectNotFound ObjectNotFound + ErrObjectLocked ObjectLocked + ErrLockIrregularObject LockIrregularObject ErrObjectAlreadyRemoved ObjectAlreadyRemoved - // ErrLockNonRegularObject is an instance of LockNonRegularObject error status. It's expected to be used for [errors.Is] - // and MUST NOT be changed. - ErrLockNonRegularObject LockNonRegularObject - // ErrObjectAccessDenied is an instance of ObjectAccessDenied error status. It's expected to be used for [errors.Is] - // and MUST NOT be changed. - ErrObjectAccessDenied ObjectAccessDenied - // ErrObjectNotFound is an instance of ObjectNotFound error status. It's expected to be used for [errors.Is] - // and MUST NOT be changed. - ErrObjectNotFound ObjectNotFound - // ErrObjectOutOfRange is an instance of ObjectOutOfRange error status. It's expected to be used for [errors.Is] - // and MUST NOT be changed. - ErrObjectOutOfRange ObjectOutOfRange + ErrObjectOutOfRange ObjectOutOfRange ) // ObjectLocked describes status of the failure because of the locked object. -// Instances provide [StatusV2] and error interfaces. -type ObjectLocked struct { - v2 status.Status -} - -const defaultObjectLockedMsg = "object is locked" +type ObjectLocked struct{ msg string } +// Error implements built-in error interface. func (x ObjectLocked) Error() string { - msg := x.v2.Message() - if msg == "" { - msg = defaultObjectLockedMsg + const desc = "object is locked" + if x.msg != "" { + return fmt.Sprintf(errFmt, status.ObjectLocked, desc, x.msg) } - - return errMessageStatusV2( - globalizeCodeV2(object.StatusLocked, object.GlobalizeFail), - msg, - ) + return fmt.Sprintf(errFmtNoMessage, status.ObjectLocked, desc) } -// Is implements interface for correct checking current error type with [errors.Is]. -func (x ObjectLocked) Is(target error) bool { - switch target.(type) { - default: - return errors.Is(Error, target) - case ObjectLocked, *ObjectLocked: - return true - } -} +// Is checks whether target is of type ObjectLocked, *ObjectLocked or [Error]. +// Is implements interface consumed by [errors.Is]. +func (x ObjectLocked) Is(target error) bool { return errorIs(x, target) } -// implements local interface defined in [ErrorFromV2] func. -func (x *ObjectLocked) fromStatusV2(st *status.Status) { - x.v2 = *st +func (x *ObjectLocked) readFromV2(m *status.Status) error { + if m.Code != status.ObjectLocked { + panic(fmt.Sprintf("unexpected code %d instead of %d", m.Code, status.ObjectLocked)) + } + if len(m.Details) > 0 { + return errors.New("details attached but not supported") + } + x.msg = m.Message + return nil } // ErrorToV2 implements [StatusV2] interface method. -// If the value was returned by [ErrorFromV2], returns the source message. -// Otherwise, returns message with -// - code: LOCKED; -// - string message: "object is locked"; -// - details: empty. func (x ObjectLocked) ErrorToV2() *status.Status { - x.v2.SetCode(globalizeCodeV2(object.StatusLocked, object.GlobalizeFail)) - x.v2.SetMessage(defaultObjectLockedMsg) - return &x.v2 -} - -// LockNonRegularObject describes status returned on locking the non-regular object. -// Instances provide [StatusV2] and error interfaces. -type LockNonRegularObject struct { - v2 status.Status + return &status.Status{Code: status.ObjectLocked, Message: x.msg} } -const defaultLockNonRegularObjectMsg = "locking non-regular object is forbidden" +// LockIrregularObject describes status returned on locking the irregular +// object. +type LockIrregularObject struct{ msg string } -func (x LockNonRegularObject) Error() string { - msg := x.v2.Message() - if msg == "" { - msg = defaultLockNonRegularObjectMsg +// Error implements built-in error interface. +func (x LockIrregularObject) Error() string { + const desc = "locking irregular object is forbidden" + if x.msg != "" { + return fmt.Sprintf(errFmt, status.LockIrregularObject, desc, x.msg) } - - return errMessageStatusV2( - globalizeCodeV2(object.StatusLockNonRegularObject, object.GlobalizeFail), - msg, - ) + return fmt.Sprintf(errFmtNoMessage, status.LockIrregularObject, desc) } -// Is implements interface for correct checking current error type with [errors.Is]. -func (x LockNonRegularObject) Is(target error) bool { - switch target.(type) { - default: - return errors.Is(Error, target) - case LockNonRegularObject, *LockNonRegularObject: - return true - } -} +// Is checks whether target is of type LockIrregularObject, *LockIrregularObject +// or [Error]. Is implements interface consumed by [errors.Is]. +func (x LockIrregularObject) Is(target error) bool { return errorIs(x, target) } -// implements local interface defined in [ErrorFromV2] func. -func (x *LockNonRegularObject) fromStatusV2(st *status.Status) { - x.v2 = *st +func (x *LockIrregularObject) readFromV2(m *status.Status) error { + if m.Code != status.LockIrregularObject { + panic(fmt.Sprintf("unexpected code %d instead of %d", m.Code, status.LockIrregularObject)) + } + if len(m.Details) > 0 { + return errors.New("details attached but not supported") + } + x.msg = m.Message + return nil } // ErrorToV2 implements [StatusV2] interface method. -// If the value was returned by [ErrorFromV2], returns the source message. -// Otherwise, returns message with -// - code: LOCK_NON_REGULAR_OBJECT; -// - string message: "locking non-regular object is forbidden"; -// - details: empty. -func (x LockNonRegularObject) ErrorToV2() *status.Status { - x.v2.SetCode(globalizeCodeV2(object.StatusLockNonRegularObject, object.GlobalizeFail)) - x.v2.SetMessage(defaultLockNonRegularObjectMsg) - return &x.v2 +func (x LockIrregularObject) ErrorToV2() *status.Status { + return &status.Status{Code: status.LockIrregularObject, Message: x.msg} } -// ObjectAccessDenied describes status of the failure because of the access control violation. -// Instances provide [StatusV2] and error interfaces. -type ObjectAccessDenied struct { - v2 status.Status -} +// ObjectAccessDenied describes status of the failure because of the access +// control violation. +type ObjectAccessDenied struct{ reason, msg string } -const defaultObjectAccessDeniedMsg = "access to object operation denied" +// NewObjectAccessDeniedError constructs object access denial error indicating +// the reason. +func NewObjectAccessDeniedError(reason string) ObjectAccessDenied { + return ObjectAccessDenied{reason: reason} +} +// Error implements built-in error interface. func (x ObjectAccessDenied) Error() string { - msg := x.v2.Message() - if msg == "" { - msg = defaultObjectAccessDeniedMsg + const desc = "object access denied" + if x.msg != "" { + if x.reason != "" { + return fmt.Sprintf(errFmt, status.ObjectAccessDenied, fmt.Sprintf("%s, reason: %s", desc, x.reason), x.msg) + } + return fmt.Sprintf(errFmt, status.ObjectAccessDenied, desc, x.msg) } - - return errMessageStatusV2( - globalizeCodeV2(object.StatusAccessDenied, object.GlobalizeFail), - msg, - ) -} - -// Is implements interface for correct checking current error type with [errors.Is]. -func (x ObjectAccessDenied) Is(target error) bool { - switch target.(type) { - default: - return errors.Is(Error, target) - case ObjectAccessDenied, *ObjectAccessDenied: - return true + if x.reason != "" { + return fmt.Sprintf(errFmtNoMessage, status.ObjectAccessDenied, fmt.Sprintf("%s, reason: %s", desc, x.reason)) } + return fmt.Sprintf(errFmtNoMessage, status.ObjectAccessDenied, desc) } -// implements local interface defined in [ErrorFromV2] func. -func (x *ObjectAccessDenied) fromStatusV2(st *status.Status) { - x.v2 = *st +// Is checks whether target is of type ObjectAccessDenied, *ObjectAccessDenied +// or [Error]. Is implements interface consumed by [errors.Is]. +func (x ObjectAccessDenied) Is(target error) bool { return errorIs(x, target) } + +func (x *ObjectAccessDenied) readFromV2(m *status.Status) error { + if m.Code != status.ObjectAccessDenied { + panic(fmt.Sprintf("unexpected code %d instead of %d", m.Code, status.ObjectAccessDenied)) + } + if len(m.Details) > 0 { + if len(m.Details) > 1 { + return fmt.Errorf("too many details (%d)", len(m.Details)) + } + if m.Details[0].Id != status.DetailObjectAccessDenialReason { + return fmt.Errorf("unsupported detail ID=%d", m.Details[0].Id) + } + if !utf8.Valid(m.Details[0].Value) { + return errors.New("invalid reason detail: invalid UTF-8 string") + } + x.reason = string(m.Details[0].Value) + } else { + x.reason = "" + } + x.msg = m.Message + return nil } // ErrorToV2 implements [StatusV2] interface method. -// If the value was returned by [ErrorFromV2], returns the source message. -// Otherwise, returns message with -// - code: ACCESS_DENIED; -// - string message: "access to object operation denied"; -// - details: empty. func (x ObjectAccessDenied) ErrorToV2() *status.Status { - x.v2.SetCode(globalizeCodeV2(object.StatusAccessDenied, object.GlobalizeFail)) - x.v2.SetMessage(defaultObjectAccessDeniedMsg) - return &x.v2 -} - -// WriteReason writes human-readable access rejection reason. -func (x *ObjectAccessDenied) WriteReason(reason string) { - object.WriteAccessDeniedDesc(&x.v2, reason) + st := status.Status{Code: status.ObjectAccessDenied, Message: x.msg} + if x.reason != "" { + st.Details = []*status.Status_Detail{{ + Id: status.DetailObjectAccessDenialReason, + Value: []byte(x.reason), + }} + } + return &st } // Reason returns human-readable access rejection reason returned by the server. // Returns empty value is reason is not presented. func (x ObjectAccessDenied) Reason() string { - return object.ReadAccessDeniedDesc(x.v2) + return x.reason } // ObjectNotFound describes status of the failure because of the missing object. -// Instances provide [StatusV2] and error interfaces. -type ObjectNotFound struct { - v2 status.Status -} +type ObjectNotFound string -const defaultObjectNotFoundMsg = "object not found" +// NewObjectNotFoundError constructs missing object error with specified cause. +func NewObjectNotFoundError(cause error) ObjectNotFound { + return ObjectNotFound(cause.Error()) +} +// Error implements built-in error interface. func (x ObjectNotFound) Error() string { - msg := x.v2.Message() - if msg == "" { - msg = defaultObjectNotFoundMsg + const desc = "object not found" + if x != "" { + return fmt.Sprintf(errFmt, status.ObjectNotFound, desc, string(x)) } - - return errMessageStatusV2( - globalizeCodeV2(object.StatusNotFound, object.GlobalizeFail), - msg, - ) + return fmt.Sprintf(errFmtNoMessage, status.ObjectNotFound, desc) } -// Is implements interface for correct checking current error type with [errors.Is]. -func (x ObjectNotFound) Is(target error) bool { - switch target.(type) { - default: - return errors.Is(Error, target) - case ObjectNotFound, *ObjectNotFound: - return true - } -} +// Is checks whether target is of type ObjectNotFound, *ObjectNotFound or +// [Error]. Is implements interface consumed by [errors.Is]. +func (x ObjectNotFound) Is(target error) bool { return errorIs(x, target) } -// implements local interface defined in [ErrorFromV2] func. -func (x *ObjectNotFound) fromStatusV2(st *status.Status) { - x.v2 = *st +func (x *ObjectNotFound) readFromV2(m *status.Status) error { + if m.Code != status.ObjectNotFound { + panic(fmt.Sprintf("unexpected code %d instead of %d", m.Code, status.ObjectNotFound)) + } + if len(m.Details) > 0 { + return errors.New("details attached but not supported") + } + *x = ObjectNotFound(m.Message) + return nil } // ErrorToV2 implements [StatusV2] interface method. -// If the value was returned by [ErrorFromV2], returns the source message. -// Otherwise, returns message with -// - code: OBJECT_NOT_FOUND; -// - string message: "object not found"; -// - details: empty. func (x ObjectNotFound) ErrorToV2() *status.Status { - x.v2.SetCode(globalizeCodeV2(object.StatusNotFound, object.GlobalizeFail)) - x.v2.SetMessage(defaultObjectNotFoundMsg) - return &x.v2 + return &status.Status{Code: status.ObjectNotFound, Message: string(x)} } // ObjectAlreadyRemoved describes status of the failure because object has been -// already removed. Instances provide Status and StatusV2 interfaces. -type ObjectAlreadyRemoved struct { - v2 status.Status -} - -const defaultObjectAlreadyRemovedMsg = "object already removed" +// already removed. +type ObjectAlreadyRemoved struct{ msg string } +// Error implements built-in error interface. func (x ObjectAlreadyRemoved) Error() string { - msg := x.v2.Message() - if msg == "" { - msg = defaultObjectAlreadyRemovedMsg + const desc = "object already removed" + if x.msg != "" { + return fmt.Sprintf(errFmt, status.ObjectAlreadyRemoved, desc, x.msg) } - - return errMessageStatusV2( - globalizeCodeV2(object.StatusAlreadyRemoved, object.GlobalizeFail), - msg, - ) + return fmt.Sprintf(errFmtNoMessage, status.ObjectAlreadyRemoved, desc) } -// Is implements interface for correct checking current error type with [errors.Is]. -func (x ObjectAlreadyRemoved) Is(target error) bool { - switch target.(type) { - default: - return errors.Is(Error, target) - case ObjectAlreadyRemoved, *ObjectAlreadyRemoved: - return true - } -} +// Is checks whether target is of type ObjectAlreadyRemoved, +// *ObjectAlreadyRemoved or [Error]. Is implements interface consumed by +// [errors.Is]. +func (x ObjectAlreadyRemoved) Is(target error) bool { return errorIs(x, target) } -// implements local interface defined in [ErrorFromV2] func. -func (x *ObjectAlreadyRemoved) fromStatusV2(st *status.Status) { - x.v2 = *st +func (x *ObjectAlreadyRemoved) readFromV2(m *status.Status) error { + if m.Code != status.ObjectAlreadyRemoved { + panic(fmt.Sprintf("unexpected code %d instead of %d", m.Code, status.ObjectAlreadyRemoved)) + } + if len(m.Details) > 0 { + return errors.New("details attached but not supported") + } + x.msg = m.Message + return nil } // ErrorToV2 implements [StatusV2] interface method. -// If the value was returned by [ErrorFromV2], returns the source message. -// Otherwise, returns message with -// - code: OBJECT_ALREADY_REMOVED; -// - string message: "object already removed"; -// - details: empty. func (x ObjectAlreadyRemoved) ErrorToV2() *status.Status { - x.v2.SetCode(globalizeCodeV2(object.StatusAlreadyRemoved, object.GlobalizeFail)) - x.v2.SetMessage(defaultObjectAlreadyRemovedMsg) - return &x.v2 + return &status.Status{Code: status.ObjectAlreadyRemoved, Message: x.msg} } // ObjectOutOfRange describes status of the failure because of the incorrect // provided object ranges. -// Instances provide [StatusV2] and error interfaces. -type ObjectOutOfRange struct { - v2 status.Status -} - -const defaultObjectOutOfRangeMsg = "out of range" +type ObjectOutOfRange struct{ msg string } +// Error implements built-in error interface. func (x ObjectOutOfRange) Error() string { - msg := x.v2.Message() - if msg == "" { - msg = defaultObjectOutOfRangeMsg + const desc = "out of range" + if x.msg != "" { + return fmt.Sprintf(errFmt, status.OutOfRange, desc, x.msg) } - - return errMessageStatusV2( - globalizeCodeV2(object.StatusOutOfRange, object.GlobalizeFail), - msg, - ) + return fmt.Sprintf(errFmtNoMessage, status.OutOfRange, desc) } -// Is implements interface for correct checking current error type with [errors.Is]. -func (x ObjectOutOfRange) Is(target error) bool { - switch target.(type) { - default: - return errors.Is(Error, target) - case ObjectOutOfRange, *ObjectOutOfRange: - return true - } -} +// Is checks whether target is of type ObjectOutOfRange, *ObjectOutOfRange or +// [Error]. Is implements interface consumed by [errors.Is]. +func (x ObjectOutOfRange) Is(target error) bool { return errorIs(x, target) } -// implements local interface defined in [ErrorFromV2] func. -func (x *ObjectOutOfRange) fromStatusV2(st *status.Status) { - x.v2 = *st +func (x *ObjectOutOfRange) readFromV2(m *status.Status) error { + if m.Code != status.OutOfRange { + panic(fmt.Sprintf("unexpected code %d instead of %d", m.Code, status.OutOfRange)) + } + if len(m.Details) > 0 { + return errors.New("details attached but not supported") + } + x.msg = m.Message + return nil } // ErrorToV2 implements [StatusV2] interface method. -// If the value was returned by [ErrorFromV2], returns the source message. -// Otherwise, returns message with -// - code: OUT_OF_RANGE; -// - string message: "out of range"; -// - details: empty. func (x ObjectOutOfRange) ErrorToV2() *status.Status { - x.v2.SetCode(globalizeCodeV2(object.StatusOutOfRange, object.GlobalizeFail)) - x.v2.SetMessage(defaultObjectOutOfRangeMsg) - return &x.v2 + return &status.Status{Code: status.OutOfRange, Message: x.msg} } diff --git a/client/status/object_test.go b/client/status/object_test.go index 126588a82..0de928f41 100644 --- a/client/status/object_test.go +++ b/client/status/object_test.go @@ -1,26 +1,280 @@ package apistatus_test import ( + "errors" "testing" + "github.com/nspcc-dev/neofs-sdk-go/api/status" apistatus "github.com/nspcc-dev/neofs-sdk-go/client/status" "github.com/stretchr/testify/require" ) -func TestObjectAccessDenied_WriteReason(t *testing.T) { - const reason = "any reason" +func TestObjectLocked_Error(t *testing.T) { + var e apistatus.ObjectLocked + require.EqualError(t, e, "status: code = 2050 (object is locked)") - var st apistatus.ObjectAccessDenied + e2, err := apistatus.ErrorFromV2(&status.Status{Code: 2050, Message: "any message"}) + require.NoError(t, err) + require.ErrorAs(t, e2, &e) + require.EqualError(t, e, "status: code = 2050 (object is locked) message = any message") +} + +func TestObjectLocked_Is(t *testing.T) { + assertErrorIs(t, apistatus.ErrObjectLocked) +} + +func TestObjectLocked_As(t *testing.T) { + var src, dst apistatus.ObjectLocked + require.ErrorAs(t, src, &dst) + require.Equal(t, src.Error(), dst.Error()) + require.Equal(t, src.ErrorToV2(), dst.ErrorToV2()) + + e, err := apistatus.ErrorFromV2(&status.Status{Code: 2050, Message: "any message"}) + require.NoError(t, err) + require.ErrorAs(t, e, &src) + require.ErrorAs(t, src, &dst) + require.Equal(t, src.Error(), dst.Error()) + require.Equal(t, src.ErrorToV2(), dst.ErrorToV2()) +} + +func TestObjectLocked_ErrorToV2(t *testing.T) { + var e apistatus.ObjectLocked + st := e.ErrorToV2() + require.EqualValues(t, 2050, st.Code) + require.Zero(t, st.Message) + require.Zero(t, st.Details) + + e2, err := apistatus.ErrorFromV2(&status.Status{Code: 2050, Message: "any message"}) + require.NoError(t, err) + require.ErrorAs(t, e2, &e) + st = e.ErrorToV2() + require.EqualValues(t, 2050, st.Code) + require.Equal(t, "any message", st.Message) + require.Zero(t, st.Details) +} + +func TestLockIrregularObject_Error(t *testing.T) { + var e apistatus.LockIrregularObject + require.EqualError(t, e, "status: code = 2051 (locking irregular object is forbidden)") + + e2, err := apistatus.ErrorFromV2(&status.Status{Code: 2051, Message: "any message"}) + require.NoError(t, err) + require.ErrorAs(t, e2, &e) + require.EqualError(t, e, "status: code = 2051 (locking irregular object is forbidden) message = any message") +} + +func TestLockIrregularObject_Is(t *testing.T) { + assertErrorIs(t, apistatus.ErrLockIrregularObject) +} + +func TestLockIrregularObject_As(t *testing.T) { + var src, dst apistatus.LockIrregularObject + require.ErrorAs(t, src, &dst) + require.Equal(t, src.Error(), dst.Error()) + require.Equal(t, src.ErrorToV2(), dst.ErrorToV2()) + + e, err := apistatus.ErrorFromV2(&status.Status{Code: 2051, Message: "any message"}) + require.NoError(t, err) + require.ErrorAs(t, e, &src) + require.ErrorAs(t, src, &dst) + require.Equal(t, src.Error(), dst.Error()) + require.Equal(t, src.ErrorToV2(), dst.ErrorToV2()) +} + +func TestLockIrregularObject_ErrorToV2(t *testing.T) { + var e apistatus.LockIrregularObject + st := e.ErrorToV2() + require.EqualValues(t, 2051, st.Code) + require.Zero(t, st.Message) + require.Zero(t, st.Details) + + e2, err := apistatus.ErrorFromV2(&status.Status{Code: 2051, Message: "any message"}) + require.NoError(t, err) + require.ErrorAs(t, e2, &e) + st = e.ErrorToV2() + require.EqualValues(t, 2051, st.Code) + require.Equal(t, "any message", st.Message) + require.Zero(t, st.Details) +} + +func TestObjectAccessDenied_Error(t *testing.T) { + var e apistatus.ObjectAccessDenied + require.EqualError(t, e, "status: code = 2048 (object access denied)") + e = apistatus.NewObjectAccessDeniedError("some reason") + require.EqualError(t, e, "status: code = 2048 (object access denied, reason: some reason)") + + e2, err := apistatus.ErrorFromV2(&status.Status{Code: 2048, Message: "any message"}) + require.NoError(t, err) + require.ErrorAs(t, e2, &e) + require.EqualError(t, e, "status: code = 2048 (object access denied) message = any message") + + e2, err = apistatus.ErrorFromV2(&status.Status{Code: 2048, Message: "any message", Details: []*status.Status_Detail{{ + Id: 0, + Value: []byte("some reason"), + }}}) + require.NoError(t, err) + require.ErrorAs(t, e2, &e) + require.EqualError(t, e, "status: code = 2048 (object access denied, reason: some reason) message = any message") +} + +func TestObjectAccessDenied_Is(t *testing.T) { + assertErrorIs(t, apistatus.ErrObjectAccessDenied) +} + +func TestObjectAccessDenied_As(t *testing.T) { + var e, dst apistatus.ObjectAccessDenied + require.ErrorAs(t, e, &dst) + require.EqualValues(t, e.Reason(), dst.Reason()) + require.Equal(t, e.Error(), dst.Error()) + require.Equal(t, e.ErrorToV2(), dst.ErrorToV2()) + + e = apistatus.NewObjectAccessDeniedError("some reason") + require.ErrorAs(t, e, &dst) + require.EqualValues(t, "some reason", dst.Reason()) + require.Equal(t, e.Error(), dst.Error()) + require.Equal(t, e.ErrorToV2(), dst.ErrorToV2()) +} + +func TestObjectAccessDenied_ErrorToV2(t *testing.T) { + var e apistatus.ObjectAccessDenied + st := e.ErrorToV2() + require.EqualValues(t, 2048, st.Code) + require.Zero(t, st.Message) + require.Zero(t, st.Details) - res := st.Reason() - require.Empty(t, res) - detailNum := apistatus.ErrorToV2(st).NumberOfDetails() - require.Zero(t, detailNum) + e = apistatus.NewObjectAccessDeniedError("some reason") + st = e.ErrorToV2() + require.EqualValues(t, 2048, st.Code) + require.Zero(t, st.Message) + require.Equal(t, []*status.Status_Detail{{ + Id: 0, + Value: []byte("some reason"), + }}, st.Details) +} + +func TestObjectNotFound_Error(t *testing.T) { + var e apistatus.ObjectNotFound + require.EqualError(t, e, "status: code = 2049 (object not found)") + e = apistatus.NewObjectNotFoundError(errors.New("some reason")) + require.EqualError(t, e, "status: code = 2049 (object not found) message = some reason") +} + +func TestObjectNotFound_Is(t *testing.T) { + assertErrorIs(t, apistatus.ErrObjectNotFound) +} + +func TestObjectNotFound_As(t *testing.T) { + var src, dst apistatus.ObjectNotFound + require.ErrorAs(t, src, &dst) + require.EqualValues(t, "", dst) + require.Equal(t, src.Error(), dst.Error()) + require.Equal(t, src.ErrorToV2(), dst.ErrorToV2()) + + src = apistatus.NewObjectNotFoundError(errors.New("some reason")) + require.ErrorAs(t, src, &dst) + require.EqualValues(t, "some reason", dst) + require.Equal(t, src.Error(), dst.Error()) + require.Equal(t, src.ErrorToV2(), dst.ErrorToV2()) +} + +func TestObjectNotFound_ErrorToV2(t *testing.T) { + var e apistatus.ObjectNotFound + st := e.ErrorToV2() + require.EqualValues(t, 2049, st.Code) + require.Zero(t, st.Message) + require.Zero(t, st.Details) + + e = apistatus.NewObjectNotFoundError(errors.New("some reason")) + st = e.ErrorToV2() + require.EqualValues(t, 2049, st.Code) + require.Equal(t, "some reason", st.Message) + require.Zero(t, st.Details) +} + +func TestObjectAlreadyRemoved_Error(t *testing.T) { + var e apistatus.ObjectAlreadyRemoved + require.EqualError(t, e, "status: code = 2052 (object already removed)") + + e2, err := apistatus.ErrorFromV2(&status.Status{Code: 2052, Message: "any message"}) + require.NoError(t, err) + require.ErrorAs(t, e2, &e) + require.EqualError(t, e, "status: code = 2052 (object already removed) message = any message") +} + +func TestObjectAlreadyRemoved_Is(t *testing.T) { + assertErrorIs(t, apistatus.ErrObjectAlreadyRemoved) +} + +func TestObjectAlreadyRemoved_As(t *testing.T) { + var src, dst apistatus.ObjectAlreadyRemoved + require.ErrorAs(t, src, &dst) + require.Equal(t, src.Error(), dst.Error()) + require.Equal(t, src.ErrorToV2(), dst.ErrorToV2()) + + e, err := apistatus.ErrorFromV2(&status.Status{Code: 2052, Message: "any message"}) + require.NoError(t, err) + require.ErrorAs(t, e, &src) + require.ErrorAs(t, src, &dst) + require.Equal(t, src.Error(), dst.Error()) + require.Equal(t, src.ErrorToV2(), dst.ErrorToV2()) +} + +func TestObjectAlreadyRemoved_ErrorToV2(t *testing.T) { + var e apistatus.ObjectAlreadyRemoved + st := e.ErrorToV2() + require.EqualValues(t, 2052, st.Code) + require.Zero(t, st.Message) + require.Zero(t, st.Details) + + e2, err := apistatus.ErrorFromV2(&status.Status{Code: 2052, Message: "any message"}) + require.NoError(t, err) + require.ErrorAs(t, e2, &e) + st = e.ErrorToV2() + require.EqualValues(t, 2052, st.Code) + require.Equal(t, "any message", st.Message) + require.Zero(t, st.Details) +} + +func TestObjectOutOfRange_Error(t *testing.T) { + var e apistatus.ObjectOutOfRange + require.EqualError(t, e, "status: code = 2053 (out of range)") + + e2, err := apistatus.ErrorFromV2(&status.Status{Code: 2053, Message: "any message"}) + require.NoError(t, err) + require.ErrorAs(t, e2, &e) + require.EqualError(t, e, "status: code = 2053 (out of range) message = any message") +} + +func TestObjectOutOfRange_Is(t *testing.T) { + assertErrorIs(t, apistatus.ErrObjectOutOfRange) +} + +func TestObjectOutOfRange_As(t *testing.T) { + var src, dst apistatus.ObjectOutOfRange + require.ErrorAs(t, src, &dst) + require.Equal(t, src.Error(), dst.Error()) + require.Equal(t, src.ErrorToV2(), dst.ErrorToV2()) + + e, err := apistatus.ErrorFromV2(&status.Status{Code: 2053, Message: "any message"}) + require.NoError(t, err) + require.ErrorAs(t, e, &src) + require.ErrorAs(t, src, &dst) + require.Equal(t, src.Error(), dst.Error()) + require.Equal(t, src.ErrorToV2(), dst.ErrorToV2()) +} - st.WriteReason(reason) +func TestObjectOutOfRange_ErrorToV2(t *testing.T) { + var e apistatus.ObjectOutOfRange + st := e.ErrorToV2() + require.EqualValues(t, 2053, st.Code) + require.Zero(t, st.Message) + require.Zero(t, st.Details) - res = st.Reason() - require.Equal(t, reason, res) - detailNum = apistatus.ErrorToV2(st).NumberOfDetails() - require.EqualValues(t, 1, detailNum) + e2, err := apistatus.ErrorFromV2(&status.Status{Code: 2053, Message: "any message"}) + require.NoError(t, err) + require.ErrorAs(t, e2, &e) + st = e.ErrorToV2() + require.EqualValues(t, 2053, st.Code) + require.Equal(t, "any message", st.Message) + require.Zero(t, st.Details) } diff --git a/client/status/session.go b/client/status/session.go index 6fc470df9..24ad897bb 100644 --- a/client/status/session.go +++ b/client/status/session.go @@ -2,110 +2,80 @@ package apistatus import ( "errors" + "fmt" - "github.com/nspcc-dev/neofs-api-go/v2/session" - "github.com/nspcc-dev/neofs-api-go/v2/status" + "github.com/nspcc-dev/neofs-sdk-go/api/status" ) +// Session error instances which may be used to check API errors against using +// [errors.Is]. All of them MUST NOT be changed. var ( - // ErrSessionTokenNotFound is an instance of SessionTokenNotFound error status. It's expected to be used for [errors.Is] - // and MUST NOT be changed. ErrSessionTokenNotFound SessionTokenNotFound - // ErrSessionTokenExpired is an instance of SessionTokenExpired error status. It's expected to be used for [errors.Is] - // and MUST NOT be changed. - ErrSessionTokenExpired SessionTokenExpired + ErrSessionTokenExpired SessionTokenExpired ) // SessionTokenNotFound describes status of the failure because of the missing session token. -// Instances provide [StatusV2] and error interfaces. -type SessionTokenNotFound struct { - v2 status.Status -} - -const defaultSessionTokenNotFoundMsg = "session token not found" +type SessionTokenNotFound struct{ msg string } +// Error implements built-in error interface. func (x SessionTokenNotFound) Error() string { - msg := x.v2.Message() - if msg == "" { - msg = defaultSessionTokenNotFoundMsg + const desc = "session token not found" + if x.msg != "" { + return fmt.Sprintf(errFmt, status.SessionTokenNotFound, desc, x.msg) } - - return errMessageStatusV2( - globalizeCodeV2(session.StatusTokenNotFound, session.GlobalizeFail), - msg, - ) + return fmt.Sprintf(errFmtNoMessage, status.SessionTokenNotFound, desc) } -// Is implements interface for correct checking current error type with [errors.Is]. -func (x SessionTokenNotFound) Is(target error) bool { - switch target.(type) { - default: - return errors.Is(Error, target) - case SessionTokenNotFound, *SessionTokenNotFound: - return true - } -} +// Is checks whether target is of type SessionTokenNotFound, +// *SessionTokenNotFound or [Error]. Is implements interface consumed by +// [errors.Is]. +func (x SessionTokenNotFound) Is(target error) bool { return errorIs(x, target) } -// implements local interface defined in [ErrorFromV2] func. -func (x *SessionTokenNotFound) fromStatusV2(st *status.Status) { - x.v2 = *st +func (x *SessionTokenNotFound) readFromV2(m *status.Status) error { + if m.Code != status.SessionTokenNotFound { + panic(fmt.Sprintf("unexpected code %d instead of %d", m.Code, status.SessionTokenNotFound)) + } + if len(m.Details) > 0 { + return errors.New("details attached but not supported") + } + x.msg = m.Message + return nil } // ErrorToV2 implements [StatusV2] interface method. -// If the value was returned by [ErrorFromV2], returns the source message. -// Otherwise, returns message with -// - code: TOKEN_NOT_FOUND; -// - string message: "session token not found"; -// - details: empty. func (x SessionTokenNotFound) ErrorToV2() *status.Status { - x.v2.SetCode(globalizeCodeV2(session.StatusTokenNotFound, session.GlobalizeFail)) - x.v2.SetMessage(defaultSessionTokenNotFoundMsg) - return &x.v2 + return &status.Status{Code: status.SessionTokenNotFound, Message: x.msg} } -// SessionTokenExpired describes status of the failure because of the expired session token. -// Instances provide [StatusV2] and error interfaces. -type SessionTokenExpired struct { - v2 status.Status -} - -const defaultSessionTokenExpiredMsg = "expired session token" +// SessionTokenExpired describes status of the failure because of the expired +// session token. +type SessionTokenExpired struct{ msg string } +// Error implements built-in error interface. func (x SessionTokenExpired) Error() string { - msg := x.v2.Message() - if msg == "" { - msg = defaultSessionTokenExpiredMsg + const desc = "session token has expired" + if x.msg != "" { + return fmt.Sprintf(errFmt, status.SessionTokenExpired, desc, x.msg) } - - return errMessageStatusV2( - globalizeCodeV2(session.StatusTokenExpired, session.GlobalizeFail), - msg, - ) + return fmt.Sprintf(errFmtNoMessage, status.SessionTokenExpired, desc) } -// Is implements interface for correct checking current error type with [errors.Is]. -func (x SessionTokenExpired) Is(target error) bool { - switch target.(type) { - default: - return errors.Is(Error, target) - case SessionTokenExpired, *SessionTokenExpired: - return true - } -} +// Is checks whether target is of type SessionTokenExpired, *SessionTokenExpired +// or [Error]. Is implements interface consumed by [errors.Is]. +func (x SessionTokenExpired) Is(target error) bool { return errorIs(x, target) } -// implements local interface defined in [ErrorFromV2] func. -func (x *SessionTokenExpired) fromStatusV2(st *status.Status) { - x.v2 = *st +func (x *SessionTokenExpired) readFromV2(m *status.Status) error { + if m.Code != status.SessionTokenExpired { + panic(fmt.Sprintf("unexpected code %d instead of %d", m.Code, status.SessionTokenExpired)) + } + if len(m.Details) > 0 { + return errors.New("details attached but not supported") + } + x.msg = m.Message + return nil } // ErrorToV2 implements [StatusV2] interface method. -// If the value was returned by [ErrorFromV2], returns the source message. -// Otherwise, returns message with -// - code: TOKEN_EXPIRED; -// - string message: "expired session token"; -// - details: empty. func (x SessionTokenExpired) ErrorToV2() *status.Status { - x.v2.SetCode(globalizeCodeV2(session.StatusTokenExpired, session.GlobalizeFail)) - x.v2.SetMessage(defaultSessionTokenExpiredMsg) - return &x.v2 + return &status.Status{Code: status.SessionTokenExpired, Message: x.msg} } diff --git a/client/status/session_test.go b/client/status/session_test.go new file mode 100644 index 000000000..01af8f8f0 --- /dev/null +++ b/client/status/session_test.go @@ -0,0 +1,97 @@ +package apistatus_test + +import ( + "testing" + + "github.com/nspcc-dev/neofs-sdk-go/api/status" + apistatus "github.com/nspcc-dev/neofs-sdk-go/client/status" + "github.com/stretchr/testify/require" +) + +func TestSessionTokenNotFound_Error(t *testing.T) { + var e apistatus.SessionTokenNotFound + require.EqualError(t, e, "status: code = 4096 (session token not found)") + + e2, err := apistatus.ErrorFromV2(&status.Status{Code: 4096, Message: "any message"}) + require.NoError(t, err) + require.ErrorAs(t, e2, &e) + require.EqualError(t, e, "status: code = 4096 (session token not found) message = any message") +} + +func TestSessionTokenNotFound_Is(t *testing.T) { + assertErrorIs(t, apistatus.ErrSessionTokenNotFound) +} + +func TestSessionTokenNotFound_As(t *testing.T) { + var src, dst apistatus.SessionTokenNotFound + require.ErrorAs(t, src, &dst) + require.Equal(t, src.Error(), dst.Error()) + require.Equal(t, src.ErrorToV2(), dst.ErrorToV2()) + + e, err := apistatus.ErrorFromV2(&status.Status{Code: 4096, Message: "any message"}) + require.NoError(t, err) + require.ErrorAs(t, e, &src) + require.ErrorAs(t, src, &dst) + require.Equal(t, src.Error(), dst.Error()) + require.Equal(t, src.ErrorToV2(), dst.ErrorToV2()) +} + +func TestSessionTokenNotFound_ErrorToV2(t *testing.T) { + var e apistatus.SessionTokenNotFound + st := e.ErrorToV2() + require.EqualValues(t, 4096, st.Code) + require.Zero(t, st.Message) + require.Zero(t, st.Details) + + e2, err := apistatus.ErrorFromV2(&status.Status{Code: 4096, Message: "any message"}) + require.NoError(t, err) + require.ErrorAs(t, e2, &e) + st = e.ErrorToV2() + require.EqualValues(t, 4096, st.Code) + require.Equal(t, "any message", st.Message) + require.Zero(t, st.Details) +} + +func TestSessionTokenExpired_Error(t *testing.T) { + var e apistatus.SessionTokenExpired + require.EqualError(t, e, "status: code = 4097 (session token has expired)") + + e2, err := apistatus.ErrorFromV2(&status.Status{Code: 4097, Message: "any message"}) + require.NoError(t, err) + require.ErrorAs(t, e2, &e) + require.EqualError(t, e, "status: code = 4097 (session token has expired) message = any message") +} + +func TestSessionTokenExpired_Is(t *testing.T) { + assertErrorIs(t, apistatus.ErrSessionTokenExpired) +} + +func TestSessionTokenExpired_As(t *testing.T) { + var src, dst apistatus.SessionTokenExpired + require.ErrorAs(t, src, &dst) + require.Equal(t, src.Error(), dst.Error()) + require.Equal(t, src.ErrorToV2(), dst.ErrorToV2()) + + e, err := apistatus.ErrorFromV2(&status.Status{Code: 4097, Message: "any message"}) + require.NoError(t, err) + require.ErrorAs(t, e, &src) + require.ErrorAs(t, src, &dst) + require.Equal(t, src.Error(), dst.Error()) + require.Equal(t, src.ErrorToV2(), dst.ErrorToV2()) +} + +func TestSessionTokenExpired_ErrorToV2(t *testing.T) { + var e apistatus.SessionTokenExpired + st := e.ErrorToV2() + require.EqualValues(t, 4097, st.Code) + require.Zero(t, st.Message) + require.Zero(t, st.Details) + + e2, err := apistatus.ErrorFromV2(&status.Status{Code: 4097, Message: "any message"}) + require.NoError(t, err) + require.ErrorAs(t, e2, &e) + st = e.ErrorToV2() + require.EqualValues(t, 4097, st.Code) + require.Equal(t, "any message", st.Message) + require.Zero(t, st.Details) +} diff --git a/client/status/unrecognized.go b/client/status/unrecognized.go index 15a8e1e87..bcd7e9419 100644 --- a/client/status/unrecognized.go +++ b/client/status/unrecognized.go @@ -1,34 +1,36 @@ package apistatus import ( - "github.com/nspcc-dev/neofs-api-go/v2/status" -) + "fmt" -// ErrUnrecognizedStatusV2 is an instance of UnrecognizedStatusV2 error status. It's expected to be used for [errors.Is] -// and MUST NOT be changed. -var ErrUnrecognizedStatusV2 UnrecognizedStatusV2 + "github.com/nspcc-dev/neofs-sdk-go/api/status" +) -// UnrecognizedStatusV2 describes status of the uncertain failure. -// Instances provide [StatusV2] and error interfaces. -type UnrecognizedStatusV2 struct { - v2 status.Status +// unrecognizedStatus is used for unknown statuses which cannot be interpreted +// by this package. +type unrecognizedStatusV2 struct { + code uint32 + msg string + details []*status.Status_Detail } -func (x UnrecognizedStatusV2) Error() string { - return errMessageStatusV2("unrecognized", x.v2.Message()) +func (x unrecognizedStatusV2) Error() string { + const desc = "unknown" + if x.msg != "" { + return fmt.Sprintf(errFmt, x.code, fmt.Sprintf("%s, details count = %d", desc, len(x.details)), x.msg) + } + return fmt.Sprintf(errFmtNoMessage, x.code, fmt.Sprintf("%s, details count = %d", desc, len(x.details))) } -// Is implements interface for correct checking current error type with [errors.Is]. -func (x UnrecognizedStatusV2) Is(target error) bool { - switch target.(type) { - default: - return false - case UnrecognizedStatusV2, *UnrecognizedStatusV2: - return true +func (x unrecognizedStatusV2) ErrorToV2() *status.Status { + return &status.Status{ + Code: x.code, + Message: x.msg, + Details: x.details, } } -// implements local interface defined in [ErrorFromV2] func. -func (x *UnrecognizedStatusV2) fromStatusV2(st *status.Status) { - x.v2 = *st -} +// Is checks whether target is of type unrecognizedStatusV2, +// *unrecognizedStatusV2 or [Error]. Is implements interface consumed by +// [errors.Is]. +func (x unrecognizedStatusV2) Is(target error) bool { return errorIs(x, target) } diff --git a/client/status/v2.go b/client/status/v2.go index 8974ea216..d0948ff6b 100644 --- a/client/status/v2.go +++ b/client/status/v2.go @@ -4,17 +4,31 @@ import ( "errors" "fmt" - "github.com/nspcc-dev/neofs-api-go/v2/container" - "github.com/nspcc-dev/neofs-api-go/v2/object" - "github.com/nspcc-dev/neofs-api-go/v2/session" - "github.com/nspcc-dev/neofs-api-go/v2/status" + "github.com/nspcc-dev/neofs-sdk-go/api/status" ) -// StatusV2 defines a variety of status instances compatible with NeoFS API V2 protocol. +const ( + errFmtNoMessage = "status: code = %v (%s)" + errFmt = errFmtNoMessage + " message = %s" +) + +// errorIs checks whether target is of type T, *T or [Error]. +func errorIs[T error, PTR *T](_ T, target error) bool { + switch target.(type) { + default: + return errors.Is(Error, target) + case T, PTR: + return true + } +} + +// StatusV2 defines a variety of status instances compatible with NeoFS API V2 +// protocol. // // Note: it is not recommended to use this type directly, it is intended for documentation of the library functionality. type StatusV2 interface { - // ErrorToV2 returns the status as github.com/nspcc-dev/neofs-api-go/v2/status.Status message structure. + error + // ErrorToV2 returns the status as [status.Status] message structure. ErrorToV2() *status.Status } @@ -30,100 +44,127 @@ type StatusV2 interface { // - [status.OK]: nil (this also includes nil argument). // // Common failures: -// - [status.Internal]: *[ServerInternal]; -// - [status.SignatureVerificationFail]: *[SignatureVerification]. -// - [status.WrongMagicNumber]: *[WrongMagicNumber]. -// - [status.NodeUnderMaintenance]: *[NodeUnderMaintenance]. +// - [status.InternalServerError]: [InternalServerError]; +// - [status.SignatureVerificationFail]: [SignatureVerification]. +// - [status.WrongMagicNumber]: [WrongMagicNumber]. +// - [status.NodeUnderMaintenance]: [NodeUnderMaintenance]. // // Object failures: -// - [object.StatusLocked]: *[ObjectLocked]; -// - [object.StatusLockNonRegularObject]: *[LockNonRegularObject]. -// - [object.StatusAccessDenied]: *[ObjectAccessDenied]. -// - [object.StatusNotFound]: *[ObjectNotFound]. -// - [object.StatusAlreadyRemoved]: *[ObjectAlreadyRemoved]. -// - [object.StatusOutOfRange]: *[ObjectOutOfRange]. +// - [status.ObjectLocked]: [ObjectLocked]; +// - [status.LockIrregularObject]: [LockNonRegularObject]. +// - [status.ObjectAccessDenied]: [ObjectAccessDenied]. +// - [status.ObjectNotFound]: [ObjectNotFound]. +// - [status.ObjectAlreadyRemoved]: [ObjectAlreadyRemoved]. +// - [status.OutOfRange]: [ObjectOutOfRange]. // // Container failures: -// - [container.StatusNotFound]: *[ContainerNotFound]; -// - [container.StatusEACLNotFound]: *[EACLNotFound]; +// - [status.ContainerNotFound]: [ContainerNotFound]; +// - [status.EACLNotFound]: [EACLNotFound]; // // Session failures: -// - [session.StatusTokenNotFound]: *[SessionTokenNotFound]; -// - [session.StatusTokenExpired]: *[SessionTokenExpired]; -func ErrorFromV2(st *status.Status) error { - var decoder interface { - fromStatusV2(*status.Status) - Error() string - } - - switch code := st.Code(); { - case status.IsSuccess(code): - //nolint:exhaustive - switch status.LocalizeSuccess(&code); code { - case status.OK: - return nil +// - [status.SessionTokenNotFound]: [SessionTokenNotFound]; +// - [status.SessionTokenExpired]: [SessionTokenExpired]; +func ErrorFromV2(st *status.Status) (StatusV2, error) { + switch st.GetCode() { + default: + return unrecognizedStatusV2{st.Code, st.Message, st.Details}, nil + case status.OK: + return nil, nil + case status.InternalServerError: + var e InternalServerError + if err := e.readFromV2(st); err != nil { + return nil, fmt.Errorf("invalid internal server error status: %w", err) } - case status.IsCommonFail(code): - switch status.LocalizeCommonFail(&code); code { - case status.Internal: - decoder = new(ServerInternal) - case status.WrongMagicNumber: - decoder = new(WrongMagicNumber) - case status.SignatureVerificationFail: - decoder = new(SignatureVerification) - case status.NodeUnderMaintenance: - decoder = new(NodeUnderMaintenance) + return e, nil + case status.WrongNetMagic: + var e WrongNetMagic + if err := e.readFromV2(st); err != nil { + return nil, fmt.Errorf("invalid wrong network magic status: %w", err) } - case object.LocalizeFailStatus(&code): - switch code { - case object.StatusLocked: - decoder = new(ObjectLocked) - case object.StatusLockNonRegularObject: - decoder = new(LockNonRegularObject) - case object.StatusAccessDenied: - decoder = new(ObjectAccessDenied) - case object.StatusNotFound: - decoder = new(ObjectNotFound) - case object.StatusAlreadyRemoved: - decoder = new(ObjectAlreadyRemoved) - case object.StatusOutOfRange: - decoder = new(ObjectOutOfRange) + return e, nil + case status.SignatureVerificationFail: + var e SignatureVerificationFailure + if err := e.readFromV2(st); err != nil { + return nil, fmt.Errorf("invalid signature verification failure status: %w", err) } - case container.LocalizeFailStatus(&code): - //nolint:exhaustive - switch code { - case container.StatusNotFound: - decoder = new(ContainerNotFound) - case container.StatusEACLNotFound: - decoder = new(EACLNotFound) + return e, nil + case status.NodeUnderMaintenance: + var e NodeUnderMaintenance + if err := e.readFromV2(st); err != nil { + return nil, fmt.Errorf("invalid node maintenance status: %w", err) } - case session.LocalizeFailStatus(&code): - //nolint:exhaustive - switch code { - case session.StatusTokenNotFound: - decoder = new(SessionTokenNotFound) - case session.StatusTokenExpired: - decoder = new(SessionTokenExpired) + return e, nil + case status.ObjectAccessDenied: + var e ObjectAccessDenied + if err := e.readFromV2(st); err != nil { + return nil, fmt.Errorf("invalid object access denial status: %w", err) } + return e, nil + case status.ObjectNotFound: + var e ObjectNotFound + if err := e.readFromV2(st); err != nil { + return nil, fmt.Errorf("invalid missing object status: %w", err) + } + return e, nil + case status.ObjectLocked: + var e ObjectLocked + if err := e.readFromV2(st); err != nil { + return nil, fmt.Errorf("invalid locked object status: %w", err) + } + return e, nil + case status.LockIrregularObject: + var e LockIrregularObject + if err := e.readFromV2(st); err != nil { + return nil, fmt.Errorf("invalid locking irregular object status: %w", err) + } + return e, nil + case status.ObjectAlreadyRemoved: + var e ObjectAlreadyRemoved + if err := e.readFromV2(st); err != nil { + return nil, fmt.Errorf("invalid already removed object status: %w", err) + } + return e, nil + case status.OutOfRange: + var e ObjectOutOfRange + if err := e.readFromV2(st); err != nil { + return nil, fmt.Errorf("invalid out-of-range status: %w", err) + } + return e, nil + case status.ContainerNotFound: + var e ContainerNotFound + if err := e.readFromV2(st); err != nil { + return nil, fmt.Errorf("invalid missing container status: %w", err) + } + return e, nil + case status.EACLNotFound: + var e EACLNotFound + if err := e.readFromV2(st); err != nil { + return nil, fmt.Errorf("invalid missing eACL status: %w", err) + } + return e, nil + case status.SessionTokenNotFound: + var e SessionTokenNotFound + if err := e.readFromV2(st); err != nil { + return nil, fmt.Errorf("invalid missing session token status: %w", err) + } + return e, nil + case status.SessionTokenExpired: + var e SessionTokenExpired + if err := e.readFromV2(st); err != nil { + return nil, fmt.Errorf("invalid expired session token status: %w", err) + } + return e, nil } - - if decoder == nil { - decoder = new(UnrecognizedStatusV2) - } - - decoder.fromStatusV2(st) - - return decoder } // ErrorToV2 converts error to status.Status message structure. Inverse to [ErrorFromV2] operation. // -// If argument is the [StatusV2] instance, it is converted directly. -// Otherwise, successes are converted with [status.OK] code w/o details and message, -// failures - with [status.Internal] and error text message w/o details. +// If argument is the [StatusV2] instance, it is converted directly. Otherwise, +// successes are returned as nil, failures - with [status.Internal] and error +// text message w/o details. func ErrorToV2(err error) *status.Status { if err == nil { - return newStatusV2WithLocalCode(status.OK, status.GlobalizeSuccess) + return nil } var instance StatusV2 @@ -131,34 +172,5 @@ func ErrorToV2(err error) *status.Status { return instance.ErrorToV2() } - internalErrorStatus := newStatusV2WithLocalCode(status.Internal, status.GlobalizeCommonFail) - internalErrorStatus.SetMessage(err.Error()) - - return internalErrorStatus -} - -func errMessageStatusV2(code any, msg string) string { - const ( - noMsgFmt = "status: code = %v" - msgFmt = noMsgFmt + " message = %s" - ) - - if msg != "" { - return fmt.Sprintf(msgFmt, code, msg) - } - - return fmt.Sprintf(noMsgFmt, code) -} - -func newStatusV2WithLocalCode(code status.Code, globalizer func(*status.Code)) *status.Status { - var st status.Status - - st.SetCode(globalizeCodeV2(code, globalizer)) - - return &st -} - -func globalizeCodeV2(code status.Code, globalizer func(*status.Code)) status.Code { - globalizer(&code) - return code + return &status.Status{Code: status.InternalServerError, Message: err.Error()} } diff --git a/client/status/v2_test.go b/client/status/v2_test.go index d38beb210..9047b5a9a 100644 --- a/client/status/v2_test.go +++ b/client/status/v2_test.go @@ -4,219 +4,178 @@ import ( "errors" "testing" + "github.com/nspcc-dev/neofs-sdk-go/api/status" apistatus "github.com/nspcc-dev/neofs-sdk-go/client/status" "github.com/stretchr/testify/require" ) -func TestFromStatusV2(t *testing.T) { - type statusConstructor func() error - - for _, testItem := range [...]struct { - status any // Status or statusConstructor - codeV2 uint64 - messageV2 string - compatibleErrs []error - checkAsErr func(error) bool +func TestErrorToV2(t *testing.T) { + for _, testCase := range []struct { + e error + code uint32 + msg string + details []*status.Status_Detail }{ - { - status: (statusConstructor)(func() error { - return errors.New("some error") - }), - codeV2: 1024, - messageV2: "some error", - }, - { - status: (statusConstructor)(func() error { - return nil - }), - codeV2: 0, - }, - { - status: (statusConstructor)(func() error { - st := new(apistatus.ServerInternal) - st.SetMessage("internal error message") - - return st - }), - codeV2: 1024, - compatibleErrs: []error{apistatus.ErrServerInternal, apistatus.ServerInternal{}, &apistatus.ServerInternal{}, apistatus.Error}, - checkAsErr: func(err error) bool { - var target *apistatus.ServerInternal - return errors.As(err, &target) - }, - }, - { - status: (statusConstructor)(func() error { - st := new(apistatus.WrongMagicNumber) - st.WriteCorrectMagic(322) - - return st - }), - codeV2: 1025, - compatibleErrs: []error{apistatus.ErrWrongMagicNumber, apistatus.WrongMagicNumber{}, &apistatus.WrongMagicNumber{}, apistatus.Error}, - checkAsErr: func(err error) bool { - var target *apistatus.WrongMagicNumber - return errors.As(err, &target) - }, - }, - { - status: (statusConstructor)(func() error { - return new(apistatus.ObjectLocked) - }), - codeV2: 2050, - compatibleErrs: []error{apistatus.ErrObjectLocked, apistatus.ObjectLocked{}, &apistatus.ObjectLocked{}, apistatus.Error}, - checkAsErr: func(err error) bool { - var target *apistatus.ObjectLocked - return errors.As(err, &target) - }, - }, - { - status: (statusConstructor)(func() error { - return new(apistatus.LockNonRegularObject) - }), - codeV2: 2051, - compatibleErrs: []error{apistatus.ErrLockNonRegularObject, apistatus.LockNonRegularObject{}, &apistatus.LockNonRegularObject{}, apistatus.Error}, - checkAsErr: func(err error) bool { - var target *apistatus.LockNonRegularObject - return errors.As(err, &target) - }, - }, - { - status: (statusConstructor)(func() error { - st := new(apistatus.ObjectAccessDenied) - st.WriteReason("any reason") - - return st - }), - codeV2: 2048, - compatibleErrs: []error{apistatus.ErrObjectAccessDenied, apistatus.ObjectAccessDenied{}, &apistatus.ObjectAccessDenied{}, apistatus.Error}, - checkAsErr: func(err error) bool { - var target *apistatus.ObjectAccessDenied - return errors.As(err, &target) - }, - }, - { - status: (statusConstructor)(func() error { - return new(apistatus.ObjectNotFound) - }), - codeV2: 2049, - compatibleErrs: []error{apistatus.ErrObjectNotFound, apistatus.ObjectNotFound{}, &apistatus.ObjectNotFound{}, apistatus.Error}, - checkAsErr: func(err error) bool { - var target *apistatus.ObjectNotFound - return errors.As(err, &target) - }, - }, - { - status: (statusConstructor)(func() error { - return new(apistatus.ObjectAlreadyRemoved) - }), - codeV2: 2052, - compatibleErrs: []error{apistatus.ErrObjectAlreadyRemoved, apistatus.ObjectAlreadyRemoved{}, &apistatus.ObjectAlreadyRemoved{}, apistatus.Error}, - checkAsErr: func(err error) bool { - var target *apistatus.ObjectAlreadyRemoved - return errors.As(err, &target) - }, - }, - { - status: statusConstructor(func() error { - return new(apistatus.ObjectOutOfRange) - }), - codeV2: 2053, - compatibleErrs: []error{apistatus.ErrObjectOutOfRange, apistatus.ObjectOutOfRange{}, &apistatus.ObjectOutOfRange{}, apistatus.Error}, - checkAsErr: func(err error) bool { - var target *apistatus.ObjectOutOfRange - return errors.As(err, &target) - }, - }, - { - status: (statusConstructor)(func() error { - return new(apistatus.ContainerNotFound) - }), - codeV2: 3072, - compatibleErrs: []error{apistatus.ErrContainerNotFound, apistatus.ContainerNotFound{}, &apistatus.ContainerNotFound{}, apistatus.Error}, - checkAsErr: func(err error) bool { - var target *apistatus.ContainerNotFound - return errors.As(err, &target) - }, - }, - { - status: (statusConstructor)(func() error { - return new(apistatus.EACLNotFound) - }), - codeV2: 3073, - compatibleErrs: []error{apistatus.ErrEACLNotFound, apistatus.EACLNotFound{}, &apistatus.EACLNotFound{}, apistatus.Error}, - checkAsErr: func(err error) bool { - var target *apistatus.EACLNotFound - return errors.As(err, &target) - }, - }, - { - status: (statusConstructor)(func() error { - return new(apistatus.SessionTokenNotFound) - }), - codeV2: 4096, - compatibleErrs: []error{apistatus.ErrSessionTokenNotFound, apistatus.SessionTokenNotFound{}, &apistatus.SessionTokenNotFound{}, apistatus.Error}, - checkAsErr: func(err error) bool { - var target *apistatus.SessionTokenNotFound - return errors.As(err, &target) - }, - }, - { - status: (statusConstructor)(func() error { - return new(apistatus.SessionTokenExpired) - }), - codeV2: 4097, - compatibleErrs: []error{apistatus.ErrSessionTokenExpired, apistatus.SessionTokenExpired{}, &apistatus.SessionTokenExpired{}, apistatus.Error}, - checkAsErr: func(err error) bool { - var target *apistatus.SessionTokenExpired - return errors.As(err, &target) - }, - }, - { - status: (statusConstructor)(func() error { - return new(apistatus.NodeUnderMaintenance) - }), - codeV2: 1027, - compatibleErrs: []error{apistatus.ErrNodeUnderMaintenance, apistatus.NodeUnderMaintenance{}, &apistatus.NodeUnderMaintenance{}, apistatus.Error}, - checkAsErr: func(err error) bool { - var target *apistatus.NodeUnderMaintenance - return errors.As(err, &target) - }, - }, + {nil, 0, "", nil}, + {errors.New("some error"), 1024, "some error", nil}, + {apistatus.ErrServerInternal, 1024, "", nil}, + {apistatus.NewInternalServerError(errors.New("some reason")), 1024, "some reason", nil}, + {apistatus.ErrWrongNetMagic, 1025, "", nil}, + {apistatus.NewWrongNetMagicError(6091320843), 1025, "", + []*status.Status_Detail{{Id: 0, Value: []byte{0, 0, 0, 1, 107, 18, 46, 11}}}}, + {apistatus.ErrSignatureVerification, 1026, "", nil}, + {apistatus.NewSignatureVerificationFailure(errors.New("some reason")), 1026, "some reason", nil}, + {apistatus.ErrNodeUnderMaintenance, 1027, "", nil}, + {apistatus.ErrContainerNotFound, 3072, "", nil}, + {apistatus.NewContainerNotFoundError(errors.New("some reason")), 3072, "some reason", nil}, + {apistatus.ErrEACLNotFound, 3073, "", nil}, + {apistatus.NewEACLNotFoundError(errors.New("some reason")), 3073, "some reason", nil}, + {apistatus.ErrObjectLocked, 2050, "", nil}, + {apistatus.ErrLockIrregularObject, 2051, "", nil}, + {apistatus.ErrObjectAccessDenied, 2048, "", nil}, + {apistatus.NewObjectAccessDeniedError("some reason"), 2048, "", + []*status.Status_Detail{{Id: 0, Value: []byte("some reason")}}}, + {apistatus.ErrObjectNotFound, 2049, "", nil}, + {apistatus.NewObjectNotFoundError(errors.New("some reason")), 2049, "some reason", nil}, + {apistatus.ErrObjectAlreadyRemoved, 2052, "", nil}, + {apistatus.ErrObjectOutOfRange, 2053, "", nil}, + {apistatus.ErrSessionTokenNotFound, 4096, "", nil}, + {apistatus.ErrSessionTokenExpired, 4097, "", nil}, } { - var st error - cons, ok := testItem.status.(statusConstructor) - require.True(t, ok) - - st = cons() - - stv2 := apistatus.ErrorToV2(st) - - // must generate the same status.Status message - require.EqualValues(t, testItem.codeV2, stv2.Code()) - if len(testItem.messageV2) > 0 { - require.Equal(t, testItem.messageV2, stv2.Message()) - } - - _, ok = st.(apistatus.StatusV2) - if ok { - // restore and convert again - restored := apistatus.ErrorFromV2(stv2) - - res := apistatus.ErrorToV2(restored) + st := apistatus.ErrorToV2(testCase.e) + require.Equal(t, testCase.code, st.GetCode(), testCase) + require.Equal(t, testCase.msg, st.GetMessage(), testCase) + require.Equal(t, testCase.details, st.GetDetails(), testCase) + } +} - // must generate the same status.Status message - require.Equal(t, stv2, res) - } +func TestErrorFromV2(t *testing.T) { + t.Run("valid", func(t *testing.T) { + t.Run("unrecognized", func(t *testing.T) { + st := &status.Status{ + Code: 92035, + Details: []*status.Status_Detail{ + {Id: 45982, Value: []byte("detail_1")}, + {Id: 3464363, Value: []byte("detail_2")}, + }, + } + e, err := apistatus.ErrorFromV2(st) + require.NoError(t, err) + require.ErrorIs(t, e, apistatus.Error) + require.Equal(t, st, e.ErrorToV2()) + require.EqualError(t, e, "status: code = 92035 (unknown, details count = 2)") - randomError := errors.New("garbage") - for _, err := range testItem.compatibleErrs { - require.ErrorIs(t, st, err) - require.NotErrorIs(t, randomError, err) + st.Message = "any message" + e, err = apistatus.ErrorFromV2(st) + require.NoError(t, err) + require.Equal(t, st, e.ErrorToV2()) + require.EqualError(t, e, "status: code = 92035 (unknown, details count = 2) message = any message") + }) + for _, testCase := range []struct { + st *status.Status + err error + }{ + {st: nil, err: nil}, + {st: new(status.Status), err: nil}, + {st: &status.Status{Code: 1024}, err: apistatus.ErrServerInternal}, + {st: &status.Status{Code: 1024, Message: "some reason"}, + err: apistatus.NewInternalServerError(errors.New("some reason"))}, + {st: &status.Status{Code: 1025}, err: apistatus.ErrWrongNetMagic}, + {st: &status.Status{Code: 1025, Details: []*status.Status_Detail{{Id: 0, Value: []byte{0, 0, 0, 1, 107, 18, 46, 11}}}}, + err: apistatus.NewWrongNetMagicError(6091320843)}, + {st: &status.Status{Code: 1026}, err: apistatus.ErrSignatureVerification}, + {st: &status.Status{Code: 1026, Message: "some reason"}, + err: apistatus.NewSignatureVerificationFailure(errors.New("some reason"))}, + {st: &status.Status{Code: 1027}, err: apistatus.ErrNodeUnderMaintenance}, + {st: &status.Status{Code: 3072}, err: apistatus.ErrContainerNotFound}, + {st: &status.Status{Code: 3072, Message: "some reason"}, + err: apistatus.NewContainerNotFoundError(errors.New("some reason"))}, + {st: &status.Status{Code: 3073}, err: apistatus.ErrEACLNotFound}, + {st: &status.Status{Code: 3073, Message: "some reason"}, + err: apistatus.NewEACLNotFoundError(errors.New("some reason"))}, + {st: &status.Status{Code: 2050}, err: apistatus.ErrObjectLocked}, + {st: &status.Status{Code: 2051}, err: apistatus.ErrLockIrregularObject}, + {st: &status.Status{Code: 2048}, err: apistatus.ErrObjectAccessDenied}, + {st: &status.Status{Code: 2048, Details: []*status.Status_Detail{{Id: 0, Value: []byte("some reason")}}}, + err: apistatus.NewObjectAccessDeniedError("some reason")}, + {st: &status.Status{Code: 2049}, err: apistatus.ErrObjectNotFound}, + {st: &status.Status{Code: 2049, Message: "some reason"}, + err: apistatus.NewObjectNotFoundError(errors.New("some reason"))}, + {st: &status.Status{Code: 2052}, err: apistatus.ErrObjectAlreadyRemoved}, + {st: &status.Status{Code: 2053}, err: apistatus.ErrObjectOutOfRange}, + {st: &status.Status{Code: 4096}, err: apistatus.ErrSessionTokenNotFound}, + {st: &status.Status{Code: 4097}, err: apistatus.ErrSessionTokenExpired}, + } { + e, err := apistatus.ErrorFromV2(testCase.st) + require.NoError(t, err) + require.Equal(t, testCase.err, e, testCase) } - - if testItem.checkAsErr != nil { - require.True(t, testItem.checkAsErr(st)) + }) + t.Run("invalid", func(t *testing.T) { + for _, testCase := range []struct { + e string + st *status.Status + }{ + {e: "invalid internal server error status: details attached but not supported", + st: &status.Status{Code: 1024, Details: make([]*status.Status_Detail, 1)}, + }, + {e: "invalid wrong network magic status: too many details (2)", + st: &status.Status{Code: 1025, Details: make([]*status.Status_Detail, 2)}, + }, + {e: "invalid wrong network magic status: unsupported detail ID=42", + st: &status.Status{Code: 1025, Details: []*status.Status_Detail{{Id: 42}}}, + }, + {e: "invalid wrong network magic status: invalid correct value detail: invalid length 7", + st: &status.Status{Code: 1025, Details: []*status.Status_Detail{{Id: 0, Value: make([]byte, 7)}}}, + }, + {e: "invalid wrong network magic status: invalid correct value detail: invalid length 9", + st: &status.Status{Code: 1025, Details: []*status.Status_Detail{{Id: 0, Value: make([]byte, 9)}}}, + }, + {e: "invalid signature verification failure status: details attached but not supported", + st: &status.Status{Code: 1026, Details: make([]*status.Status_Detail, 1)}, + }, + {e: "invalid node maintenance status: details attached but not supported", + st: &status.Status{Code: 1027, Details: make([]*status.Status_Detail, 1)}, + }, + {e: "invalid missing container status: details attached but not supported", + st: &status.Status{Code: 3072, Details: make([]*status.Status_Detail, 1)}, + }, + {e: "invalid missing eACL status: details attached but not supported", + st: &status.Status{Code: 3073, Details: make([]*status.Status_Detail, 1)}, + }, + {e: "invalid locked object status: details attached but not supported", + st: &status.Status{Code: 2050, Details: make([]*status.Status_Detail, 1)}, + }, + {e: "invalid locking irregular object status: details attached but not supported", + st: &status.Status{Code: 2051, Details: make([]*status.Status_Detail, 1)}, + }, + {e: "invalid object access denial status: too many details (2)", + st: &status.Status{Code: 2048, Details: make([]*status.Status_Detail, 2)}, + }, + {e: "invalid object access denial status: unsupported detail ID=42", + st: &status.Status{Code: 2048, Details: []*status.Status_Detail{{Id: 42}}}, + }, + {e: "invalid object access denial status: invalid reason detail: invalid UTF-8 string", + st: &status.Status{Code: 2048, Details: []*status.Status_Detail{{Id: 0, Value: []byte{66, 250, 67}}}}, + }, + {e: "invalid missing object status: details attached but not supported", + st: &status.Status{Code: 2049, Details: make([]*status.Status_Detail, 1)}, + }, + {e: "invalid already removed object status: details attached but not supported", + st: &status.Status{Code: 2052, Details: make([]*status.Status_Detail, 1)}, + }, + {e: "invalid out-of-range status: details attached but not supported", + st: &status.Status{Code: 2053, Details: make([]*status.Status_Detail, 1)}, + }, + {e: "invalid missing session token status: details attached but not supported", + st: &status.Status{Code: 4096, Details: make([]*status.Status_Detail, 1)}, + }, + {e: "invalid expired session token status: details attached but not supported", + st: &status.Status{Code: 4097, Details: make([]*status.Status_Detail, 1)}, + }, + } { + _, err := apistatus.ErrorFromV2(testCase.st) + require.EqualError(t, err, testCase.e, testCase) } - } + }) } diff --git a/container/acl/acl.go b/container/acl/acl.go index 1df713f7a..130a15255 100644 --- a/container/acl/acl.go +++ b/container/acl/acl.go @@ -23,7 +23,7 @@ const ( opLast // extreme value for testing ) -// String implements fmt.Stringer. +// String implements [fmt.Stringer]. func (x Op) String() string { switch x { default: @@ -64,7 +64,7 @@ const ( roleLast // extreme value for testing ) -// String implements fmt.Stringer. +// String implements [fmt.Stringer]. func (x Role) String() string { switch x { default: diff --git a/container/acl/acl_basic.go b/container/acl/acl_basic.go index e1ee3ef83..affa475a7 100644 --- a/container/acl/acl_basic.go +++ b/container/acl/acl_basic.go @@ -19,6 +19,8 @@ import ( // However, these similarities should only be used for better understanding, // in general these mechanisms are different. // +// Basic implements built-in comparable interface. +// // Instances can be created using built-in var declaration, but look carefully // at the default values, and how individual permissions are regulated. // Some frequently used values are presented in exported variables. @@ -32,14 +34,14 @@ type Basic uint32 // FromBits decodes Basic from the numerical representation. // -// See also Bits. +// See also [Basic.Bits]. func (x *Basic) FromBits(bits uint32) { *x = Basic(bits) } // Bits returns numerical encoding of Basic. // -// See also FromBits. +// See also [Basic.FromBits]. func (x Basic) Bits() uint32 { return uint32(x) } @@ -64,12 +66,12 @@ const ( // DisableExtension makes Basic FINAL. FINAL indicates the ACL non-extendability // in the related container. // -// See also Extendable. +// See also [Basic.Extendable]. func (x *Basic) DisableExtension() { setBit((*uint32)(x), bitPosFinal) } -// Extendable checks if Basic is NOT made FINAL using DisableExtension. +// Extendable checks if Basic is NOT made FINAL using [Basic.DisableExtension]. // // Zero Basic is extendable. func (x Basic) Extendable() bool { @@ -79,12 +81,12 @@ func (x Basic) Extendable() bool { // MakeSticky makes Basic STICKY. STICKY indicates that only the owner of any // particular object is allowed to operate on it. // -// See also Sticky. +// See also [Basic.Sticky]. func (x *Basic) MakeSticky() { setBit((*uint32)(x), bitPosSticky) } -// Sticky checks if Basic is made STICKY using MakeSticky. +// Sticky checks if Basic is made STICKY using [Basic.MakeSticky]. // // Zero Basic is NOT STICKY. func (x Basic) Sticky() bool { @@ -121,7 +123,7 @@ func isReplicationOp(op Op) bool { // OpObjectSearch // OpObjectHash // -// See also IsOpAllowed. +// See also [Basic.IsOpAllowed]. func (x *Basic) AllowOp(op Op, role Role) { var bitPos uint8 @@ -168,7 +170,7 @@ func (x *Basic) AllowOp(op Op, role Role) { // Zero Basic prevents any role from accessing any operation in the absence // of default rights. // -// See also AllowOp. +// See also [Basic.AllowOp]. func (x Basic) IsOpAllowed(op Op, role Role) bool { var bitPos uint8 @@ -204,12 +206,12 @@ func (x Basic) IsOpAllowed(op Op, role Role) bool { // AllowBearerRules allows bearer to provide extended ACL rules for the given // operation. Bearer rules doesn't depend on container ACL extensibility. // -// See also AllowedBearerRules. +// See also [Basic.AllowedBearerRules]. func (x *Basic) AllowBearerRules(op Op) { setOpBit((*uint32)(x), op, opBitPosBearer) } -// AllowedBearerRules checks if bearer rules are allowed using AllowBearerRules. +// AllowedBearerRules checks if bearer rules are allowed using [Basic.AllowBearerRules]. // Op MUST be one of the Op enumeration. // // Zero Basic disallows bearer rules for any op. @@ -224,7 +226,7 @@ func (x Basic) EncodeToString() string { return strconv.FormatUint(uint64(x), 16) } -// Names of the frequently used Basic values. +// Names of the frequently used [Basic] values. const ( NamePrivate = "private" NamePrivateExtended = "eacl-private" @@ -236,7 +238,7 @@ const ( NamePublicAppendExtended = "eacl-public-append" ) -// Frequently used Basic values. Bitmasks are taken from the NeoFS Specification. +// Frequently used [Basic] values. Bitmasks are taken from the NeoFS Specification. const ( Private = Basic(0x1C8C8CCC) // private PrivateExtended = Basic(0x0C8C8CCC) // eacl-private @@ -248,8 +250,8 @@ const ( PublicAppendExtended = Basic(0x0FBF9FFF) // eacl-public-append ) -// DecodeString decodes string calculated using EncodeToString. Also supports -// human-readable names (Name* constants). +// DecodeString decodes string calculated using [Basic.EncodeToString]. Also +// supports human-readable names (Name* constants). func (x *Basic) DecodeString(s string) (e error) { switch s { case NamePrivate: diff --git a/container/acl/doc.go b/container/acl/doc.go index 3bf14d3ce..5e22da931 100644 --- a/container/acl/doc.go +++ b/container/acl/doc.go @@ -1,8 +1,8 @@ /* Package acl provides functionality to control access to data and operations on them in NeoFS containers. -Type Basic represents basic ACL of the NeoFS container which specifies the general order of data access. -Basic provides interface of rule composition. Package acl also exports some frequently used settings like +Type [Basic] represents basic ACL of the NeoFS container which specifies the general order of data access. +[Basic] provides interface of rule composition. Package acl also exports some frequently used settings like private or public. */ package acl diff --git a/container/container.go b/container/container.go index 6b13e9972..25d1ad781 100644 --- a/container/container.go +++ b/container/container.go @@ -1,7 +1,7 @@ package container import ( - "bytes" + "crypto/sha256" "errors" "fmt" "strconv" @@ -9,15 +9,16 @@ import ( "time" "github.com/google/uuid" - "github.com/nspcc-dev/neofs-api-go/v2/container" - v2netmap "github.com/nspcc-dev/neofs-api-go/v2/netmap" - "github.com/nspcc-dev/neofs-api-go/v2/refs" + "github.com/nspcc-dev/neofs-sdk-go/api/container" + apinetmap "github.com/nspcc-dev/neofs-sdk-go/api/netmap" + "github.com/nspcc-dev/neofs-sdk-go/api/refs" "github.com/nspcc-dev/neofs-sdk-go/container/acl" cid "github.com/nspcc-dev/neofs-sdk-go/container/id" - neofscrypto "github.com/nspcc-dev/neofs-sdk-go/crypto" "github.com/nspcc-dev/neofs-sdk-go/netmap" "github.com/nspcc-dev/neofs-sdk-go/user" "github.com/nspcc-dev/neofs-sdk-go/version" + "google.golang.org/protobuf/encoding/protojson" + "google.golang.org/protobuf/proto" ) // Container represents descriptor of the NeoFS container. Container logically @@ -28,77 +29,92 @@ import ( // // Container type instances can represent different container states in the // system, depending on the context. To create new container in NeoFS zero -// instance SHOULD be declared, initialized using Init method and filled using -// dedicated methods. Once container is saved in the NeoFS network, it can't be -// changed: containers stored in the system are immutable, and NeoFS is a CAS -// of containers that are identified by a fixed length value (see cid.ID type). +// instance should be initialized using [New] and finalized using dedicated +// methods. Once container is saved in the NeoFS network, it can't be changed: +// containers stored in the system are immutable, and NeoFS is a CAS of +// containers that are identified by a fixed length value (see [cid.ID] type). // Instances for existing containers can be initialized using decoding methods -// (e.g Unmarshal). +// (e.g [Container.Unmarshal]). // -// Container is mutually compatible with github.com/nspcc-dev/neofs-api-go/v2/container.Container -// message. See ReadFromV2 / WriteToV2 methods. +// Container is mutually compatible with [container.Container] message. See +// [Container.ReadFromV2] / [Container.WriteToV2] methods. type Container struct { - v2 container.Container + versionSet bool + version version.Version + + nonceSet bool + nonce uuid.UUID + + ownerSet bool + owner user.ID + + basicACL acl.Basic + + policySet bool + policy netmap.PlacementPolicy + + attrs []*container.Container_Attribute } +// Various well-known container attributes widely used by applications. const ( attributeName = "Name" attributeTimestamp = "Timestamp" ) -// CopyTo writes deep copy of the [Container] to dst. -func (x Container) CopyTo(dst *Container) { - dst.SetBasicACL(x.BasicACL()) - - if owner := x.v2.GetOwnerID(); owner != nil { - var newOwner refs.OwnerID - newOwner.SetValue(bytes.Clone(owner.GetValue())) - - dst.v2.SetOwnerID(&newOwner) - } else { - dst.v2.SetOwnerID(nil) - } +// System container attributes. +const ( + sysAttributePrefix = "__NEOFS__" + sysAttributeDomainName = sysAttributePrefix + "NAME" + sysAttributeDomainZone = sysAttributePrefix + "ZONE" + sysAttributeDisableHomoHash = sysAttributePrefix + "DISABLE_HOMOMORPHIC_HASHING" +) - if x.v2.GetVersion() != nil { - ver := x.v2.GetVersion() - newVer := *ver - dst.v2.SetVersion(&newVer) - } else { - dst.v2.SetVersion(nil) +// New constructs new Container instance. +func New(owner user.ID, basicACL acl.Basic, policy netmap.PlacementPolicy) Container { + return Container{ + versionSet: true, + version: version.Current, + nonceSet: true, + nonce: uuid.New(), + ownerSet: true, + owner: owner, + basicACL: basicACL, + policySet: true, + policy: policy, } +} - // do we need to set the different nonce? - dst.v2.SetNonce(bytes.Clone(x.v2.GetNonce())) - - if len(x.v2.GetAttributes()) > 0 { - dst.v2.SetAttributes([]container.Attribute{}) - - attributeIterator := func(key, val string) { - dst.SetAttribute(key, val) +// CopyTo writes deep copy of the [Container] to dst. +func (x Container) CopyTo(dst *Container) { + dst.versionSet = x.versionSet + dst.version = x.version + dst.nonceSet = x.nonceSet + dst.nonce = x.nonce + dst.ownerSet = x.ownerSet + dst.owner = x.owner + dst.SetBasicACL(x.BasicACL()) + dst.policySet = x.policySet + x.policy.CopyTo(&dst.policy) + + if x.attrs != nil { + dst.attrs = make([]*container.Container_Attribute, len(x.attrs)) + for i := range x.attrs { + if x.attrs[i] != nil { + dst.attrs[i] = &container.Container_Attribute{Key: x.attrs[i].Key, Value: x.attrs[i].Value} + } else { + dst.attrs[i] = nil + } } - - x.IterateAttributes(attributeIterator) - } - - if x.v2.GetPlacementPolicy() != nil { - var ppCopy netmap.PlacementPolicy - x.PlacementPolicy().CopyTo(&ppCopy) - dst.SetPlacementPolicy(ppCopy) } else { - x.v2.SetPlacementPolicy(nil) + dst.attrs = nil } } -// reads Container from the container.Container message. If checkFieldPresence is set, -// returns an error on absence of any protocol-required field. -func (x *Container) readFromV2(m container.Container, checkFieldPresence bool) error { +func (x *Container) readFromV2(m *container.Container, checkFieldPresence bool) error { var err error - - ownerV2 := m.GetOwnerID() - if ownerV2 != nil { - var owner user.ID - - err = owner.ReadFromV2(*ownerV2) + if x.ownerSet = m.OwnerId != nil; x.ownerSet { + err = x.owner.ReadFromV2(m.OwnerId) if err != nil { return fmt.Errorf("invalid owner: %w", err) } @@ -106,30 +122,28 @@ func (x *Container) readFromV2(m container.Container, checkFieldPresence bool) e return errors.New("missing owner") } - binNonce := m.GetNonce() - if len(binNonce) > 0 { - var nonce uuid.UUID - - err = nonce.UnmarshalBinary(binNonce) + if x.nonceSet = len(m.Nonce) > 0; x.nonceSet { + err = x.nonce.UnmarshalBinary(m.Nonce) if err != nil { return fmt.Errorf("invalid nonce: %w", err) - } else if ver := nonce.Version(); ver != 4 { - return fmt.Errorf("invalid nonce UUID version %d", ver) + } else if ver := x.nonce.Version(); ver != 4 { + return fmt.Errorf("invalid nonce: wrong UUID version %d", ver) } } else if checkFieldPresence { return errors.New("missing nonce") } - ver := m.GetVersion() - if checkFieldPresence && ver == nil { + if x.versionSet = m.Version != nil; x.versionSet { + err = x.version.ReadFromV2(m.Version) + if err != nil { + return fmt.Errorf("invalid version: %w", err) + } + } else if checkFieldPresence { return errors.New("missing version") } - policyV2 := m.GetPlacementPolicy() - if policyV2 != nil { - var policy netmap.PlacementPolicy - - err = policy.ReadFromV2(*policyV2) + if x.policySet = m.PlacementPolicy != nil; x.policySet { + err = x.policy.ReadFromV2(m.PlacementPolicy) if err != nil { return fmt.Errorf("invalid placement policy: %w", err) } @@ -138,194 +152,203 @@ func (x *Container) readFromV2(m container.Container, checkFieldPresence bool) e } attrs := m.GetAttributes() - mAttr := make(map[string]struct{}, len(attrs)) - var key, val string - var was bool - + var key string for i := range attrs { key = attrs[i].GetKey() if key == "" { - return errors.New("empty attribute key") + return fmt.Errorf("invalid attribute #%d: missing key", i) + } // also prevents further NPE + for j := 0; j < i; j++ { + if attrs[j].Key == key { + return fmt.Errorf("multiple attributes with key=%s", key) + } } - - _, was = mAttr[key] - if was { - return fmt.Errorf("duplicated attribute %s", key) + if attrs[i].Value == "" { + return fmt.Errorf("invalid attribute #%d (%s): missing value", i, key) } - - val = attrs[i].GetValue() - if val == "" { - return fmt.Errorf("empty attribute value %s", key) - } - switch key { case attributeTimestamp: - _, err = strconv.ParseInt(val, 10, 64) + _, err = strconv.ParseInt(attrs[i].Value, 10, 64) + if err != nil { + return fmt.Errorf("invalid timestamp attribute (#%d): invalid integer (%w)", i, err) + } } - - if err != nil { - return fmt.Errorf("invalid attribute value %s: %s (%w)", key, val, err) - } - - mAttr[key] = struct{}{} } - x.v2 = m + x.basicACL.FromBits(m.BasicAcl) + x.attrs = attrs return nil } -// ReadFromV2 reads Container from the container.Container message. Checks if the -// message conforms to NeoFS API V2 protocol. +// ReadFromV2 reads Container from the [container.Container] message. Returns an +// error if the message is malformed according to the NeoFS API V2 protocol. The +// message must not be nil. // -// See also WriteToV2. -func (x *Container) ReadFromV2(m container.Container) error { +// ReadFromV2 is intended to be used by the NeoFS API V2 client/server +// implementation only and is not expected to be directly used by applications. +// +// See also [Container.WriteToV2]. +func (x *Container) ReadFromV2(m *container.Container) error { return x.readFromV2(m, true) } -// WriteToV2 writes Container into the container.Container message. -// The message MUST NOT be nil. +// WriteToV2 writes Container to the [container.Container] message of the NeoFS +// API protocol. +// +// WriteToV2 is intended to be used by the NeoFS API V2 client/server +// implementation only and is not expected to be directly used by applications. // -// See also ReadFromV2. +// See also [Container.ReadFromV2]. func (x Container) WriteToV2(m *container.Container) { - *m = x.v2 + if x.versionSet { + m.Version = new(refs.Version) + x.version.WriteToV2(m.Version) + } else { + m.Version = nil + } + if x.ownerSet { + m.OwnerId = new(refs.OwnerID) + x.owner.WriteToV2(m.OwnerId) + } else { + m.OwnerId = nil + } + if x.nonceSet { + m.Nonce = x.nonce[:] + } else { + m.Nonce = nil + } + if x.policySet { + m.PlacementPolicy = new(apinetmap.PlacementPolicy) + x.policy.WriteToV2(m.PlacementPolicy) + } else { + m.PlacementPolicy = nil + } + m.BasicAcl = x.basicACL.Bits() + m.Attributes = x.attrs } // Marshal encodes Container into a binary format of the NeoFS API protocol -// (Protocol Buffers with direct field order). +// (Protocol Buffers V3 with direct field order). // -// See also Unmarshal. +// See also [Container.Unmarshal]. func (x Container) Marshal() []byte { - return x.v2.StableMarshal(nil) -} - -// SignedData returns actual payload to sign. -// -// See also [Container.CalculateSignature]. -func (x Container) SignedData() []byte { - return x.Marshal() + var m container.Container + x.WriteToV2(&m) + b := make([]byte, m.MarshaledSize()) + m.MarshalStable(b) + return b } -// Unmarshal decodes NeoFS API protocol binary format into the Container -// (Protocol Buffers with direct field order). Returns an error describing -// a format violation. +// Unmarshal decodes Protocol Buffers V3 binary data into the Container. Returns +// an error describing a format violation of the specified fields. Unmarshal +// does not check presence of the required fields and, at the same time, checks +// format of presented fields. // -// See also Marshal. +// See also [Container.Marshal]. func (x *Container) Unmarshal(data []byte) error { var m container.Container - - err := m.Unmarshal(data) + err := proto.Unmarshal(data, &m) if err != nil { - return err + return fmt.Errorf("decode protobuf: %w", err) } - - return x.readFromV2(m, false) + return x.readFromV2(&m, false) } // MarshalJSON encodes Container into a JSON format of the NeoFS API protocol -// (Protocol Buffers JSON). +// (Protocol Buffers V3 JSON). // -// See also UnmarshalJSON. +// See also [Container.UnmarshalJSON]. func (x Container) MarshalJSON() ([]byte, error) { - return x.v2.MarshalJSON() + var m container.Container + x.WriteToV2(&m) + return protojson.Marshal(&m) } -// UnmarshalJSON decodes NeoFS API protocol JSON format into the Container -// (Protocol Buffers JSON). Returns an error describing a format violation. +// UnmarshalJSON decodes NeoFS API protocol JSON data into the Container +// (Protocol Buffers V3 JSON). Returns an error describing a format violation. +// UnmarshalJSON does not check presence of the required fields and, at the same +// time, checks format of presented fields. // -// See also MarshalJSON. +// See also [Container.MarshalJSON]. func (x *Container) UnmarshalJSON(data []byte) error { - return x.v2.UnmarshalJSON(data) -} - -// Init initializes all internal data of the Container required by NeoFS API -// protocol. Init MUST be called when creating a new container. Init SHOULD NOT -// be called multiple times. Init SHOULD NOT be called if the Container instance -// is used for decoding only. -func (x *Container) Init() { - var ver refs.Version - version.Current().WriteToV2(&ver) - - x.v2.SetVersion(&ver) - - nonce, err := uuid.New().MarshalBinary() + var m container.Container + err := protojson.Unmarshal(data, &m) if err != nil { - panic(fmt.Sprintf("unexpected error from UUID.MarshalBinary: %v", err)) + return fmt.Errorf("decode protojson: %w", err) } - x.v2.SetNonce(nonce) + return x.readFromV2(&m, false) } -// SetOwner specifies the owner of the Container. Each Container has exactly -// one owner, so SetOwner MUST be called for instances to be saved in the -// NeoFS. +// SetOwner specifies the owner of the Container. Each Container has exactly one +// owner. // -// See also Owner. +// See also [Container.Owner]. func (x *Container) SetOwner(owner user.ID) { - var m refs.OwnerID - owner.WriteToV2(&m) - - x.v2.SetOwnerID(&m) + x.owner, x.ownerSet = owner, true } -// Owner returns owner of the Container set using SetOwner. +// Owner returns owner of the Container. // // Zero Container has no owner which is incorrect according to NeoFS API // protocol. -func (x Container) Owner() (res user.ID) { - m := x.v2.GetOwnerID() - if m != nil { - err := res.ReadFromV2(*m) - if err != nil { - panic(fmt.Sprintf("unexpected error from user.ID.ReadFromV2: %v", err)) - } +// +// See also [Container.SetOwner]. +func (x Container) Owner() user.ID { + if x.ownerSet { + return x.owner } - - return + return user.ID{} } // SetBasicACL specifies basic part of the Container ACL. Basic ACL is used // to control access inside container storage. // -// See also BasicACL. +// See also [Container.BasicACL]. func (x *Container) SetBasicACL(basicACL acl.Basic) { - x.v2.SetBasicACL(basicACL.Bits()) + x.basicACL = basicACL } -// BasicACL returns basic ACL set using SetBasicACL. +// BasicACL returns basic ACL of the Container. // // Zero Container has zero basic ACL which structurally correct but doesn't // make sense since it denies any access to any party. -func (x Container) BasicACL() (res acl.Basic) { - res.FromBits(x.v2.GetBasicACL()) - return +// +// See also [Container.SetBasicACL]. +func (x Container) BasicACL() acl.Basic { + return x.basicACL } // SetPlacementPolicy sets placement policy for the objects within the Container. // NeoFS storage layer strives to follow the specified policy. // -// See also PlacementPolicy. +// See also [Container.PlacementPolicy]. func (x *Container) SetPlacementPolicy(policy netmap.PlacementPolicy) { - var m v2netmap.PlacementPolicy - policy.WriteToV2(&m) - - x.v2.SetPlacementPolicy(&m) + x.policy, x.policySet = policy, true } -// PlacementPolicy returns placement policy set using SetPlacementPolicy. +// PlacementPolicy returns placement policy for the objects within the +// Container. // // Zero Container has no placement policy which is incorrect according to // NeoFS API protocol. -func (x Container) PlacementPolicy() (res netmap.PlacementPolicy) { - m := x.v2.GetPlacementPolicy() - if m != nil { - err := res.ReadFromV2(*m) - if err != nil { - panic(fmt.Sprintf("unexpected error from PlacementPolicy.ReadFromV2: %v", err)) - } +// +// See also [Container.SetPlacementPolicy]. +func (x Container) PlacementPolicy() netmap.PlacementPolicy { + if x.policySet { + return x.policy } + return netmap.PlacementPolicy{} +} - return +func (x *Container) resetAttribute(key string) { + for i := 0; i < len(x.attrs); i++ { // do not use range, slice is changed inside + if x.attrs[i].GetKey() == key { + x.attrs = append(x.attrs[:i], x.attrs[i+1:]...) + i-- + } + } } // SetAttribute sets Container attribute value by key. Both key and value @@ -338,7 +361,7 @@ func (x Container) PlacementPolicy() (res netmap.PlacementPolicy) { // // SetAttribute overwrites existing attribute value. // -// See also Attribute, IterateAttributes. +// See also [Container.Attribute], [Container.IterateAttributes]. func (x *Container) SetAttribute(key, value string) { if key == "" { panic("empty attribute key") @@ -346,46 +369,46 @@ func (x *Container) SetAttribute(key, value string) { panic("empty attribute value") } - attrs := x.v2.GetAttributes() - ln := len(attrs) - - for i := 0; i < ln; i++ { - if attrs[i].GetKey() == key { - attrs[i].SetValue(value) + for i := range x.attrs { + if x.attrs[i].GetKey() == key { + x.attrs[i].Value = value return } } - attrs = append(attrs, container.Attribute{}) - attrs[ln].SetKey(key) - attrs[ln].SetValue(value) - - x.v2.SetAttributes(attrs) + x.attrs = append(x.attrs, &container.Container_Attribute{Key: key, Value: value}) } // Attribute reads value of the Container attribute by key. Empty result means // attribute absence. // -// See also SetAttribute, IterateAttributes. +// See also [Container.SetAttribute], [Container.IterateAttributes]. func (x Container) Attribute(key string) string { - attrs := x.v2.GetAttributes() - for i := range attrs { - if attrs[i].GetKey() == key { - return attrs[i].GetValue() + for i := range x.attrs { + if x.attrs[i].GetKey() == key { + return x.attrs[i].GetValue() } } return "" } +// NumberOfAttributes returns number of all attributes specified for this +// Container. +// +// See also [NodeInfo.SetAttribute], [Container.IterateAttributes]. +func (x Container) NumberOfAttributes() int { + return len(x.attrs) +} + // IterateAttributes iterates over all Container attributes and passes them // into f. The handler MUST NOT be nil. // -// See also [Container.SetAttribute], [Container.Attribute], [Container.IterateUserAttributes]. +// See also [Container.SetAttribute], [Container.Attribute], +// [Container.NumberOfAttributes], [Container.IterateUserAttributes]. func (x Container) IterateAttributes(f func(key, val string)) { - attrs := x.v2.GetAttributes() - for i := range attrs { - f(attrs[i].GetKey(), attrs[i].GetValue()) + for i := range x.attrs { + f(x.attrs[i].GetKey(), x.attrs[i].GetValue()) } } @@ -395,7 +418,7 @@ func (x Container) IterateAttributes(f func(key, val string)) { // See also [Container.SetAttribute], [Container.Attribute], [Container.IterateAttributes]. func (x Container) IterateUserAttributes(f func(key, val string)) { x.IterateAttributes(func(key, val string) { - if !strings.HasPrefix(key, container.SysAttributePrefix) { + if !strings.HasPrefix(key, sysAttributePrefix) { f(key, val) } }) @@ -403,59 +426,66 @@ func (x Container) IterateUserAttributes(f func(key, val string)) { // SetName sets human-readable name of the Container. Name MUST NOT be empty. // -// See also Name. +// See also [Container.Name]. func (x *Container) SetName(name string) { x.SetAttribute(attributeName, name) } -// Name returns container name set using SetName. +// Name returns human-readable container name. // // Zero Container has no name. +// +// See also [Container.SetName]. func (x Container) Name() string { return x.Attribute(attributeName) } // SetCreationTime writes container's creation time in Unix Timestamp format. // -// See also CreatedAt. +// See also [Container.CreatedAt]. func (x *Container) SetCreationTime(t time.Time) { x.SetAttribute(attributeTimestamp, strconv.FormatInt(t.Unix(), 10)) } -// CreatedAt returns container's creation time set using SetCreationTime. +// CreatedAt returns container's creation time in Unix Timestamp format. // // Zero Container has zero timestamp (in seconds). +// +// See also [Container.SetCreationTime]. func (x Container) CreatedAt() time.Time { var sec int64 - - attr := x.Attribute(attributeTimestamp) - if attr != "" { + if s := x.Attribute(attributeTimestamp); s != "" { var err error - - sec, err = strconv.ParseInt(x.Attribute(attributeTimestamp), 10, 64) + sec, err = strconv.ParseInt(s, 10, 64) if err != nil { - panic(fmt.Sprintf("parse container timestamp: %v", err)) + panic(fmt.Sprintf("parse timestamp attribute: %v", err)) } } - return time.Unix(sec, 0) } const attributeHomoHashEnabled = "true" -// DisableHomomorphicHashing sets flag to disable homomorphic hashing of the -// Container data. +// SetHomomorphicHashingDisabled sets flag indicating whether homomorphic +// hashing of the Container objects in the network is disabled. // -// See also IsHomomorphicHashingDisabled. -func (x *Container) DisableHomomorphicHashing() { - x.SetAttribute(container.SysAttributeHomomorphicHashing, attributeHomoHashEnabled) +// See also [Container.HomomorphicHashingDisabled]. +func (x *Container) SetHomomorphicHashingDisabled(v bool) { + if v { + x.SetAttribute(sysAttributeDisableHomoHash, attributeHomoHashEnabled) + } else { + x.resetAttribute(sysAttributeDisableHomoHash) + } } -// IsHomomorphicHashingDisabled checks if DisableHomomorphicHashing was called. +// HomomorphicHashingDisabled returns flag indicating whether homomorphic +// hashing of the Container objects in the network is disabled. // // Zero Container has enabled hashing. -func (x Container) IsHomomorphicHashingDisabled() bool { - return x.Attribute(container.SysAttributeHomomorphicHashing) == attributeHomoHashEnabled +// +// See also [Container.SetHomomorphicHashingDisabled]. +func (x Container) HomomorphicHashingDisabled() bool { + return x.Attribute(sysAttributeDisableHomoHash) == attributeHomoHashEnabled } // Domain represents information about container domain registered in the NNS @@ -464,98 +494,79 @@ type Domain struct { name, zone string } +const defaultDomainZone = "container" + // SetName sets human-friendly container domain name. +// +// See also [Domain.Name]. func (x *Domain) SetName(name string) { x.name = name } -// Name returns name set using SetName. +// Name returns human-friendly container domain name. // // Zero Domain has zero name. +// +// See also [Domain.SetName]. func (x Domain) Name() string { return x.name } // SetZone sets zone which is used as a TLD of a domain name in NNS contract. +// +// See also [Domain.Zone]. func (x *Domain) SetZone(zone string) { x.zone = zone } -// Zone returns domain zone set using SetZone. +// Zone returns zone which is used as a TLD of a domain name in NNS contract. // // Zero Domain has "container" zone. +// +// See also [Domain.SetZone]. func (x Domain) Zone() string { if x.zone != "" { return x.zone } - - return "container" -} - -// WriteDomain writes Domain into the Container. Name MUST NOT be empty. -func (x *Container) WriteDomain(domain Domain) { - x.SetAttribute(container.SysAttributeName, domain.Name()) - x.SetAttribute(container.SysAttributeZone, domain.Zone()) -} - -// ReadDomain reads Domain from the Container. Returns value with empty name -// if domain is not specified. -func (x Container) ReadDomain() (res Domain) { - name := x.Attribute(container.SysAttributeName) - if name != "" { - res.SetName(name) - res.SetZone(x.Attribute(container.SysAttributeZone)) - } - - return + return defaultDomainZone } -// CalculateSignature calculates signature of the [Container] using provided signer -// and writes it into dst. Signature instance MUST NOT be nil. CalculateSignature -// is expected to be called after all the [Container] data is filled and before -// saving the [Container] in the NeoFS network. Note that мany subsequent change -// will most likely break the signature. signer MUST be of -// [neofscrypto.ECDSA_DETERMINISTIC_SHA256] scheme, for example, [neofsecdsa.SignerRFC6979] -// can be used. +// SetDomain specifies Domain associated with the Container. Name MUST NOT be +// empty. // -// See also [Container.VerifySignature], [Container.SignedData]. -// -// Returned errors: -// - [neofscrypto.ErrIncorrectSigner] -func (x Container) CalculateSignature(dst *neofscrypto.Signature, signer neofscrypto.Signer) error { - if signer.Scheme() != neofscrypto.ECDSA_DETERMINISTIC_SHA256 { - return fmt.Errorf("%w: expected ECDSA_DETERMINISTIC_SHA256 scheme", neofscrypto.ErrIncorrectSigner) +// See also [Container.Domain]. +func (x *Container) SetDomain(domain Domain) { + x.SetAttribute(sysAttributeDomainName, domain.Name()) + if domain.zone != "" && domain.zone != defaultDomainZone { + x.SetAttribute(sysAttributeDomainZone, domain.zone) + } else { + x.resetAttribute(sysAttributeDomainZone) } - return dst.Calculate(signer, x.Marshal()) -} - -// VerifySignature verifies Container signature calculated using CalculateSignature. -// Result means signature correctness. -func (x Container) VerifySignature(sig neofscrypto.Signature) bool { - return sig.Verify(x.Marshal()) } -// CalculateID encodes the given Container and passes the result into FromBinary. +// Domain returns Domain associated with the Container. Returns value with empty +// name if domain is not specified. // -// See also Container.Marshal, AssertID. -func (x Container) CalculateID(dst *cid.ID) { - dst.FromBinary(x.Marshal()) +// See also [Container.SetDomain]. +func (x Container) Domain() Domain { + var res Domain + name := x.Attribute(sysAttributeDomainName) + if name != "" { + res.SetName(name) + res.SetZone(x.Attribute(sysAttributeDomainZone)) + } + return res } -// AssertID checks if the given Container matches its identifier in CAS of the -// NeoFS containers. -// -// See also CalculateID. -func (x Container) AssertID(id cid.ID) bool { - var id2 cid.ID - x.CalculateID(&id2) - - return id2.Equals(id) +// CalculateID calculates and returns CAS ID for the given container. +func CalculateID(cnr Container) cid.ID { + return sha256.Sum256(cnr.Marshal()) } // Version returns the NeoFS API version this container was created with. func (x Container) Version() version.Version { - var v version.Version - _ = v.ReadFromV2(*x.v2.GetVersion()) // No, this can't fail for x. - return v + if x.versionSet { + return x.version + } + return version.Version{} } diff --git a/container/container_internal_test.go b/container/container_internal_test.go deleted file mode 100644 index e59eba937..000000000 --- a/container/container_internal_test.go +++ /dev/null @@ -1,142 +0,0 @@ -package container - -import ( - "bytes" - "math/rand" - "testing" - - "github.com/nspcc-dev/neofs-sdk-go/container/acl" - "github.com/nspcc-dev/neofs-sdk-go/netmap" - usertest "github.com/nspcc-dev/neofs-sdk-go/user/test" - "github.com/stretchr/testify/require" -) - -func TestContainer_CopyTo(t *testing.T) { - owner := usertest.ID(t) - - var container Container - container.Init() - - attrOne := "a0" - attrValue := "a1" - - container.SetOwner(owner) - container.SetBasicACL(acl.PublicRWExtended) - container.SetAttribute(attrOne, attrValue) - - var pp netmap.PlacementPolicy - pp.SetContainerBackupFactor(123) - - var rd netmap.ReplicaDescriptor - rd.SetSelectorName("selector") - rd.SetNumberOfObjects(100) - pp.SetReplicas([]netmap.ReplicaDescriptor{rd}) - - var f netmap.Filter - f.SetName("filter") - pp.SetFilters([]netmap.Filter{f}) - - var s netmap.Selector - s.SetName("selector") - pp.SetSelectors([]netmap.Selector{s}) - - container.SetPlacementPolicy(pp) - - t.Run("copy", func(t *testing.T) { - var dst Container - container.CopyTo(&dst) - - require.Equal(t, container, dst) - require.True(t, bytes.Equal(container.Marshal(), dst.Marshal())) - }) - - t.Run("change acl", func(t *testing.T) { - var dst Container - container.CopyTo(&dst) - - require.Equal(t, container.BasicACL(), dst.BasicACL()) - dst.SetBasicACL(acl.Private) - require.NotEqual(t, container.BasicACL(), dst.BasicACL()) - }) - - t.Run("change owner", func(t *testing.T) { - var dst Container - container.CopyTo(&dst) - - require.True(t, container.Owner().Equals(dst.Owner())) - - newOwner := usertest.ID(t) - dst.v2.GetOwnerID().SetValue(newOwner.WalletBytes()) - - require.False(t, container.Owner().Equals(dst.Owner())) - }) - - t.Run("replace owner", func(t *testing.T) { - var dst Container - container.CopyTo(&dst) - - require.True(t, container.Owner().Equals(dst.Owner())) - - newOwner := usertest.ID(t) - dst.SetOwner(newOwner) - - require.False(t, container.Owner().Equals(dst.Owner())) - }) - - t.Run("change nonce", func(t *testing.T) { - var dst Container - container.CopyTo(&dst) - - require.True(t, bytes.Equal(container.v2.GetNonce(), dst.v2.GetNonce())) - dst.v2.SetNonce([]byte{1, 2, 3}) - require.False(t, bytes.Equal(container.v2.GetNonce(), dst.v2.GetNonce())) - }) - - t.Run("overwrite nonce", func(t *testing.T) { - var local Container - require.Empty(t, local.v2.GetNonce()) - - var dst Container - dst.v2.SetNonce([]byte{1, 2, 3}) - require.NotEmpty(t, dst.v2.GetNonce()) - - local.CopyTo(&dst) - require.True(t, bytes.Equal(local.Marshal(), dst.Marshal())) - - require.Empty(t, local.v2.GetNonce()) - require.Empty(t, dst.v2.GetNonce()) - - require.True(t, bytes.Equal(local.v2.GetNonce(), dst.v2.GetNonce())) - dst.v2.SetNonce([]byte{1, 2, 3}) - require.False(t, bytes.Equal(local.v2.GetNonce(), dst.v2.GetNonce())) - }) - - t.Run("change version", func(t *testing.T) { - var dst Container - container.CopyTo(&dst) - - oldVer := container.v2.GetVersion() - require.NotNil(t, oldVer) - - newVer := dst.v2.GetVersion() - require.NotNil(t, newVer) - - require.Equal(t, oldVer.GetMajor(), newVer.GetMajor()) - require.Equal(t, oldVer.GetMinor(), newVer.GetMinor()) - - newVer.SetMajor(rand.Uint32()) - newVer.SetMinor(rand.Uint32()) - - require.NotEqual(t, oldVer.GetMajor(), newVer.GetMajor()) - require.NotEqual(t, oldVer.GetMinor(), newVer.GetMinor()) - }) - - t.Run("change attributes", func(t *testing.T) { - var dst Container - container.CopyTo(&dst) - - require.Equal(t, container.Attribute(attrOne), dst.Attribute(attrOne)) - dst.SetAttribute(attrOne, "value") - require.NotEqual(t, container.Attribute(attrOne), dst.Attribute(attrOne)) - }) -} diff --git a/container/container_test.go b/container/container_test.go index 440b37987..f048d40de 100644 --- a/container/container_test.go +++ b/container/container_test.go @@ -6,352 +6,1031 @@ import ( "testing" "time" - "github.com/google/uuid" - v2container "github.com/nspcc-dev/neofs-api-go/v2/container" - v2netmap "github.com/nspcc-dev/neofs-api-go/v2/netmap" - "github.com/nspcc-dev/neofs-api-go/v2/refs" + apicontainer "github.com/nspcc-dev/neofs-sdk-go/api/container" + apinetmap "github.com/nspcc-dev/neofs-sdk-go/api/netmap" + "github.com/nspcc-dev/neofs-sdk-go/api/refs" "github.com/nspcc-dev/neofs-sdk-go/container" - cid "github.com/nspcc-dev/neofs-sdk-go/container/id" - cidtest "github.com/nspcc-dev/neofs-sdk-go/container/id/test" containertest "github.com/nspcc-dev/neofs-sdk-go/container/test" - neofscrypto "github.com/nspcc-dev/neofs-sdk-go/crypto" - "github.com/nspcc-dev/neofs-sdk-go/crypto/test" + "github.com/nspcc-dev/neofs-sdk-go/netmap" netmaptest "github.com/nspcc-dev/neofs-sdk-go/netmap/test" usertest "github.com/nspcc-dev/neofs-sdk-go/user/test" - "github.com/nspcc-dev/neofs-sdk-go/version" "github.com/stretchr/testify/require" + "google.golang.org/protobuf/encoding/protojson" + "google.golang.org/protobuf/proto" ) -func TestPlacementPolicyEncoding(t *testing.T) { - v := containertest.Container(t) - - t.Run("binary", func(t *testing.T) { - var v2 container.Container - require.NoError(t, v2.Unmarshal(v.Marshal())) +func TestCalculateID(t *testing.T) { + b := containertest.Container().Marshal() + var c container.Container + require.NoError(t, c.Unmarshal(b)) + require.EqualValues(t, sha256.Sum256(b), container.CalculateID(c)) +} - require.Equal(t, v, v2) +func TestContainerDecoding(t *testing.T) { + t.Run("missing fields", func(t *testing.T) { + for _, testCase := range []struct { + name, err string + corrupt func(*apicontainer.Container) + }{ + {name: "version", err: "missing version", corrupt: func(c *apicontainer.Container) { + c.Version = nil + }}, + {name: "owner", err: "missing owner", corrupt: func(c *apicontainer.Container) { + c.OwnerId = nil + }}, + {name: "nil nonce", err: "missing nonce", corrupt: func(c *apicontainer.Container) { + c.Nonce = nil + }}, + {name: "empty nonce", err: "missing nonce", corrupt: func(c *apicontainer.Container) { + c.Nonce = []byte{} + }}, + {name: "policy", err: "missing placement policy", corrupt: func(c *apicontainer.Container) { + c.PlacementPolicy = nil + }}, + } { + t.Run(testCase.name, func(t *testing.T) { + src := containertest.Container() + var dst container.Container + var m apicontainer.Container + + src.WriteToV2(&m) + testCase.corrupt(&m) + require.ErrorContains(t, dst.ReadFromV2(&m), testCase.err) + + b, err := proto.Marshal(&m) + require.NoError(t, err) + require.NoError(t, dst.Unmarshal(b)) + + j, err := protojson.Marshal(&m) + require.NoError(t, err) + require.NoError(t, dst.UnmarshalJSON(j)) + }) + } }) - - t.Run("json", func(t *testing.T) { - data, err := v.MarshalJSON() - require.NoError(t, err) - - var v2 container.Container - require.NoError(t, v2.UnmarshalJSON(data)) - - require.Equal(t, v, v2) + t.Run("invalid fields", func(t *testing.T) { + for _, testCase := range []struct { + name, err string + corrupt func(*apicontainer.Container) + }{ + {name: "owner/nil value", err: "invalid owner: missing value", corrupt: func(c *apicontainer.Container) { + c.OwnerId.Value = nil + }}, + {name: "owner/empty value", err: "invalid owner: missing value", corrupt: func(c *apicontainer.Container) { + c.OwnerId.Value = []byte{} + }}, + {name: "owner/wrong length", err: "invalid owner: invalid value length 24", corrupt: func(c *apicontainer.Container) { + c.OwnerId.Value = make([]byte, 24) + }}, + {name: "owner/wrong prefix", err: "invalid owner: invalid prefix byte 0x34, expected 0x35", corrupt: func(c *apicontainer.Container) { + c.OwnerId.Value[0] = 0x34 + }}, + {name: "owner/checksum mismatch", err: "invalid owner: value checksum mismatch", corrupt: func(c *apicontainer.Container) { + c.OwnerId.Value[24]++ + }}, + {name: "nonce/wrong length", err: "invalid nonce: invalid UUID (got 15 bytes)", corrupt: func(c *apicontainer.Container) { + c.Nonce = make([]byte, 15) + }}, + {name: "nonce/wrong version", err: "invalid nonce: wrong UUID version 3", corrupt: func(c *apicontainer.Container) { + c.Nonce[6] = 3 << 4 + }}, + {name: "nonce/nil replicas", err: "invalid placement policy: missing replicas", corrupt: func(c *apicontainer.Container) { + c.PlacementPolicy.Replicas = nil + }}, + {name: "attributes/empty key", err: "invalid attribute #1: missing key", corrupt: func(c *apicontainer.Container) { + c.Attributes = []*apicontainer.Container_Attribute{ + {Key: "key_valid", Value: "any"}, + {Key: "", Value: "any"}, + } + }}, + {name: "attributes/repeated keys", err: "multiple attributes with key=k2", corrupt: func(c *apicontainer.Container) { + c.Attributes = []*apicontainer.Container_Attribute{ + {Key: "k1", Value: "any"}, + {Key: "k2", Value: "1"}, + {Key: "k3", Value: "any"}, + {Key: "k2", Value: "2"}, + } + }}, + {name: "attributes/empty value", err: "invalid attribute #1 (key2): missing value", corrupt: func(c *apicontainer.Container) { + c.Attributes = []*apicontainer.Container_Attribute{ + {Key: "key1", Value: "any"}, + {Key: "key2", Value: ""}, + } + }}, + {name: "attributes/invalid timestamp", err: "invalid timestamp attribute (#1): invalid integer", corrupt: func(c *apicontainer.Container) { + c.Attributes = []*apicontainer.Container_Attribute{ + {Key: "key1", Value: "any"}, + {Key: "Timestamp", Value: "not_a_number"}, + } + }}, + } { + t.Run(testCase.name, func(t *testing.T) { + src := containertest.Container() + var dst container.Container + var m apicontainer.Container + + src.WriteToV2(&m) + testCase.corrupt(&m) + require.ErrorContains(t, dst.ReadFromV2(&m), testCase.err) + + b, err := proto.Marshal(&m) + require.NoError(t, err) + require.ErrorContains(t, dst.Unmarshal(b), testCase.err) + + j, err := protojson.Marshal(&m) + require.NoError(t, err) + require.ErrorContains(t, dst.UnmarshalJSON(j), testCase.err) + }) + } }) } -func TestContainer_Init(t *testing.T) { - val := containertest.Container(t) - - val.Init() - - var ver = val.Version() - require.Equal(t, version.Current(), ver) - - var msg v2container.Container - val.WriteToV2(&msg) - - binNonce := msg.GetNonce() - - var nonce uuid.UUID - require.NoError(t, nonce.UnmarshalBinary(binNonce)) - require.EqualValues(t, 4, nonce.Version()) - - verV2 := msg.GetVersion() - require.NotNil(t, verV2) - - require.NoError(t, ver.ReadFromV2(*verV2)) - - require.Equal(t, version.Current(), ver) - - var val2 container.Container - require.NoError(t, val2.ReadFromV2(msg)) - - require.Equal(t, val, val2) +func TestContainer_Unmarshal(t *testing.T) { + t.Run("invalid binary", func(t *testing.T) { + var c container.Container + msg := []byte("definitely_not_protobuf") + err := c.Unmarshal(msg) + require.ErrorContains(t, err, "decode protobuf") + }) } -func TestContainer_Owner(t *testing.T) { - var val container.Container - - require.Zero(t, val.Owner()) - - val = containertest.Container(t) - - owner := usertest.ID(t) - - val.SetOwner(owner) - - var msg v2container.Container - val.WriteToV2(&msg) - - var msgOwner refs.OwnerID - owner.WriteToV2(&msgOwner) - - require.Equal(t, &msgOwner, msg.GetOwnerID()) - - var val2 container.Container - require.NoError(t, val2.ReadFromV2(msg)) +func TestNodeInfo_UnmarshalJSON(t *testing.T) { + t.Run("invalid json", func(t *testing.T) { + var c container.Container + msg := []byte("definitely_not_protojson") + err := c.UnmarshalJSON(msg) + require.ErrorContains(t, err, "decode protojson") + }) +} - require.True(t, val2.Owner().Equals(owner)) +func TestContainer_CopyTo(t *testing.T) { + src := containertest.Container() + pp := src.PlacementPolicy() + if len(pp.Replicas()) < 2 { + pp.SetReplicas(netmaptest.NReplicas(2)) + } + if len(pp.Filters()) < 2 { + pp.SetFilters(netmaptest.NFilters(2)) + } + if len(pp.Selectors()) < 2 { + pp.SetSelectors(netmaptest.NSelectors(2)) + } + src.SetPlacementPolicy(pp) + const attr = "any_attr" + src.SetAttribute(attr, "0") + + shallow := src + deep := containertest.Container() + src.CopyTo(&deep) + require.Equal(t, src, deep) + + shallow.SetAttribute(attr, "1") + require.Equal(t, "1", shallow.Attribute(attr)) + require.Equal(t, "1", src.Attribute(attr)) + + deep.SetAttribute(attr, "2") + require.Equal(t, "2", deep.Attribute(attr)) + require.Equal(t, "1", src.Attribute(attr)) + + rs := src.PlacementPolicy().Replicas() + originNum := rs[1].NumberOfObjects() + rs[1].SetNumberOfObjects(originNum + 1) + require.EqualValues(t, originNum+1, src.PlacementPolicy().Replicas()[1].NumberOfObjects()) + require.EqualValues(t, originNum+1, shallow.PlacementPolicy().Replicas()[1].NumberOfObjects()) + require.EqualValues(t, originNum, deep.PlacementPolicy().Replicas()[1].NumberOfObjects()) + + fs := src.PlacementPolicy().Filters() + originName := fs[1].Name() + fs[1].SetName(originName + "_extra") + require.EqualValues(t, originName+"_extra", src.PlacementPolicy().Filters()[1].Name()) + require.EqualValues(t, originName+"_extra", shallow.PlacementPolicy().Filters()[1].Name()) + require.EqualValues(t, originName, deep.PlacementPolicy().Filters()[1].Name()) + + ss := src.PlacementPolicy().Selectors() + originName = ss[1].Name() + ss[1].SetName(originName + "_extra") + require.EqualValues(t, originName+"_extra", src.PlacementPolicy().Selectors()[1].Name()) + require.EqualValues(t, originName+"_extra", shallow.PlacementPolicy().Selectors()[1].Name()) + require.EqualValues(t, originName, deep.PlacementPolicy().Selectors()[1].Name()) } -func TestContainer_BasicACL(t *testing.T) { - var val container.Container +func assertPolicyAPIEncoding(t testing.TB, policy netmap.PlacementPolicy, msg *apinetmap.PlacementPolicy) { + require.EqualValues(t, policy.ContainerBackupFactor(), msg.ContainerBackupFactor) + + if rs := policy.Replicas(); len(rs) > 0 { + require.Len(t, msg.Replicas, len(rs)) + for i := range rs { + require.EqualValues(t, rs[i].NumberOfObjects(), msg.Replicas[i].Count) + require.Equal(t, rs[i].SelectorName(), msg.Replicas[i].Selector) + } + } else { + require.Zero(t, msg.Replicas) + } - require.Zero(t, val.BasicACL()) + var assertFilters func(fs []netmap.Filter, m []*apinetmap.Filter) + assertFilters = func(fs []netmap.Filter, m []*apinetmap.Filter) { + if len(fs) > 0 { + require.Len(t, m, len(fs)) + for i := range fs { + require.Equal(t, fs[i].Name(), m[i].Name) + require.Equal(t, fs[i].Key(), m[i].Key) + require.EqualValues(t, fs[i].Op(), m[i].Op) + require.Equal(t, fs[i].Value(), m[i].Value) + assertFilters(fs[i].SubFilters(), m[i].Filters) + } + } else { + require.Zero(t, m) + } + } - val = containertest.Container(t) + assertFilters(policy.Filters(), msg.Filters) + if ss := policy.Selectors(); len(ss) > 0 { + require.Len(t, msg.Selectors, len(ss)) + for i := range ss { + require.Equal(t, ss[i].Name(), msg.Selectors[i].Name) + require.EqualValues(t, ss[i].NumberOfNodes(), msg.Selectors[i].Count) + switch { + default: + require.Zero(t, msg.Selectors[i].Clause) + case ss[i].IsSame(): + require.EqualValues(t, apinetmap.Clause_SAME, msg.Selectors[i].Clause) + case ss[i].IsDistinct(): + require.EqualValues(t, apinetmap.Clause_DISTINCT, msg.Selectors[i].Clause) + } + } + } else { + require.Zero(t, msg.Selectors) + } +} +func TestNew(t *testing.T) { + owner := usertest.ID() basicACL := containertest.BasicACL() - val.SetBasicACL(basicACL) - - var msg v2container.Container - val.WriteToV2(&msg) - - require.EqualValues(t, basicACL.Bits(), msg.GetBasicACL()) - - var val2 container.Container - require.NoError(t, val2.ReadFromV2(msg)) - - require.Equal(t, basicACL, val2.BasicACL()) + policy := netmaptest.PlacementPolicy() + + c := container.New(owner, basicACL, policy) + require.Equal(t, owner, c.Owner()) + require.Equal(t, basicACL, c.BasicACL()) + require.Equal(t, policy, c.PlacementPolicy()) + + assertFields := func(t testing.TB, cnr container.Container) { + require.Empty(t, cnr.Name()) + require.Empty(t, cnr.Domain().Name()) + require.Zero(t, cnr.NumberOfAttributes()) + require.Zero(t, cnr.CreatedAt().Unix()) + require.False(t, cnr.HomomorphicHashingDisabled()) + require.EqualValues(t, 2, cnr.Version().Major()) + require.EqualValues(t, 16, cnr.Version().Minor()) + + called := false + f := func(key, val string) { + called = true + } + cnr.IterateAttributes(f) + require.False(t, called) + cnr.IterateUserAttributes(f) + require.False(t, called) + + netCfg := netmaptest.NetworkInfo() + netCfg.SetHomomorphicHashingDisabled(true) + require.False(t, cnr.AssertNetworkConfig(netCfg)) + netCfg.SetHomomorphicHashingDisabled(false) + require.True(t, cnr.AssertNetworkConfig(netCfg)) + } + assertFields(t, c) + + t.Run("encoding", func(t *testing.T) { + t.Run("binary", func(t *testing.T) { + src := container.New(owner, basicACL, policy) + dst := containertest.Container() + + err := dst.Unmarshal(src.Marshal()) + require.NoError(t, err) + assertFields(t, dst) + }) + t.Run("api", func(t *testing.T) { + src := container.New(owner, basicACL, policy) + dst := containertest.Container() + var msg apicontainer.Container + + src.WriteToV2(&msg) + require.Equal(t, &refs.Version{Major: 2, Minor: 16}, msg.Version) + require.Equal(t, &refs.OwnerID{Value: owner[:]}, msg.OwnerId) + require.Len(t, msg.Nonce, 16) + require.EqualValues(t, 4, msg.Nonce[6]>>4) + require.EqualValues(t, basicACL, msg.BasicAcl) + require.Zero(t, msg.Attributes) + assertPolicyAPIEncoding(t, policy, msg.PlacementPolicy) + require.NoError(t, dst.ReadFromV2(&msg)) + assertFields(t, dst) + }) + t.Run("json", func(t *testing.T) { + src := container.New(owner, basicACL, policy) + dst := containertest.Container() + + j, err := src.MarshalJSON() + require.NoError(t, err) + err = dst.UnmarshalJSON(j) + require.NoError(t, err) + assertFields(t, dst) + }) + }) } -func TestContainer_PlacementPolicy(t *testing.T) { - var val container.Container - - require.Zero(t, val.PlacementPolicy()) - - val = containertest.Container(t) - - pp := netmaptest.PlacementPolicy() - val.SetPlacementPolicy(pp) - - var msgPolicy v2netmap.PlacementPolicy - pp.WriteToV2(&msgPolicy) - - var msg v2container.Container - val.WriteToV2(&msg) +func TestContainer_SetOwner(t *testing.T) { + var c container.Container + + require.Zero(t, c.Owner()) + + usr := usertest.ID() + c.SetOwner(usr) + require.Equal(t, usr, c.Owner()) + + usrOther := usertest.ChangeID(usr) + c.SetOwner(usrOther) + require.Equal(t, usrOther, c.Owner()) + + t.Run("encoding", func(t *testing.T) { + t.Run("binary", func(t *testing.T) { + var src, dst container.Container + + dst.SetOwner(usr) + err := dst.Unmarshal(src.Marshal()) + require.NoError(t, err) + require.Zero(t, dst.Owner()) + + src.SetOwner(usr) + err = dst.Unmarshal(src.Marshal()) + require.NoError(t, err) + require.Equal(t, usr, dst.Owner()) + }) + t.Run("api", func(t *testing.T) { + src := container.New(usr, containertest.BasicACL(), netmaptest.PlacementPolicy()) + var dst container.Container + var msg apicontainer.Container + + dst.SetOwner(usrOther) + src.SetOwner(usr) + src.WriteToV2(&msg) + require.Equal(t, &refs.OwnerID{Value: usr[:]}, msg.OwnerId) + require.NoError(t, dst.ReadFromV2(&msg)) + require.Equal(t, usr, dst.Owner()) + }) + t.Run("json", func(t *testing.T) { + var src, dst container.Container + + dst.SetOwner(usr) + + j, err := src.MarshalJSON() + require.NoError(t, err) + err = dst.UnmarshalJSON(j) + require.NoError(t, err) + require.Zero(t, dst.Owner()) + + src.SetOwner(usr) + j, err = src.MarshalJSON() + require.NoError(t, err) + err = dst.UnmarshalJSON(j) + require.NoError(t, err) + require.Equal(t, usr, dst.Owner()) + }) + }) +} - require.Equal(t, &msgPolicy, msg.GetPlacementPolicy()) +func TestContainer_SetBasicACL(t *testing.T) { + var c container.Container - var val2 container.Container - require.NoError(t, val2.ReadFromV2(msg)) + require.Zero(t, c.BasicACL()) - require.Equal(t, pp, val2.PlacementPolicy()) + basicACL := containertest.BasicACL() + c.SetBasicACL(basicACL) + require.Equal(t, basicACL, c.BasicACL()) + + basicACLOther := containertest.BasicACL() + c.SetBasicACL(basicACLOther) + require.Equal(t, basicACLOther, c.BasicACL()) + + t.Run("encoding", func(t *testing.T) { + t.Run("binary", func(t *testing.T) { + var src, dst container.Container + + dst.SetBasicACL(basicACL) + err := dst.Unmarshal(src.Marshal()) + require.NoError(t, err) + require.Zero(t, dst.BasicACL()) + + src.SetBasicACL(basicACL) + err = dst.Unmarshal(src.Marshal()) + require.NoError(t, err) + require.Equal(t, basicACL, dst.BasicACL()) + }) + t.Run("api", func(t *testing.T) { + src := container.New(usertest.ID(), basicACL, netmaptest.PlacementPolicy()) + var dst container.Container + var msg apicontainer.Container + + src.SetBasicACL(basicACL) + src.WriteToV2(&msg) + require.EqualValues(t, basicACL, msg.BasicAcl) + require.NoError(t, dst.ReadFromV2(&msg)) + require.Equal(t, basicACL, dst.BasicACL()) + }) + t.Run("json", func(t *testing.T) { + var src, dst container.Container + + dst.SetBasicACL(basicACL) + + j, err := src.MarshalJSON() + require.NoError(t, err) + err = dst.UnmarshalJSON(j) + require.NoError(t, err) + require.Zero(t, dst.BasicACL()) + + src.SetBasicACL(basicACL) + j, err = src.MarshalJSON() + require.NoError(t, err) + err = dst.UnmarshalJSON(j) + require.NoError(t, err) + require.Equal(t, basicACL, dst.BasicACL()) + }) + }) } -func assertContainsAttribute(t *testing.T, m v2container.Container, key, val string) { - var msgAttr v2container.Attribute - - msgAttr.SetKey(key) - msgAttr.SetValue(val) - require.Contains(t, m.GetAttributes(), msgAttr) +func TestContainer_SetPlacementPolicy(t *testing.T) { + var c container.Container + + require.Zero(t, c.PlacementPolicy()) + + policy := netmaptest.PlacementPolicy() + c.SetPlacementPolicy(policy) + require.Equal(t, policy, c.PlacementPolicy()) + + policyOther := netmaptest.PlacementPolicy() + c.SetPlacementPolicy(policyOther) + require.Equal(t, policyOther, c.PlacementPolicy()) + + t.Run("encoding", func(t *testing.T) { + t.Run("binary", func(t *testing.T) { + var src, dst container.Container + + dst.SetPlacementPolicy(policy) + err := dst.Unmarshal(src.Marshal()) + require.NoError(t, err) + require.Zero(t, dst.PlacementPolicy()) + + src.SetPlacementPolicy(policy) + err = dst.Unmarshal(src.Marshal()) + require.NoError(t, err) + require.Equal(t, policy, dst.PlacementPolicy()) + }) + t.Run("api", func(t *testing.T) { + src := container.New(usertest.ID(), containertest.BasicACL(), policy) + var dst container.Container + var msg apicontainer.Container + + src.SetPlacementPolicy(policy) + src.WriteToV2(&msg) + assertPolicyAPIEncoding(t, policy, msg.PlacementPolicy) + require.NoError(t, dst.ReadFromV2(&msg)) + require.Equal(t, policy, dst.PlacementPolicy()) + }) + t.Run("json", func(t *testing.T) { + var src, dst container.Container + + dst.SetPlacementPolicy(policy) + + j, err := src.MarshalJSON() + require.NoError(t, err) + err = dst.UnmarshalJSON(j) + require.NoError(t, err) + require.Zero(t, dst.PlacementPolicy()) + + src.SetPlacementPolicy(policy) + j, err = src.MarshalJSON() + require.NoError(t, err) + err = dst.UnmarshalJSON(j) + require.NoError(t, err) + require.Equal(t, policy, dst.PlacementPolicy()) + }) + }) } -func TestContainer_Attribute(t *testing.T) { - const attrKey1, attrKey2 = "key1", "key2" - const attrVal1, attrVal2 = "val1", "val2" - - val := containertest.Container(t) - - val.SetAttribute(attrKey1, attrVal1) - val.SetAttribute(attrKey2, attrVal2) - - var msg v2container.Container - val.WriteToV2(&msg) - - require.GreaterOrEqual(t, len(msg.GetAttributes()), 2) - assertContainsAttribute(t, msg, attrKey1, attrVal1) - assertContainsAttribute(t, msg, attrKey2, attrVal2) - - var val2 container.Container - require.NoError(t, val2.ReadFromV2(msg)) - - require.Equal(t, attrVal1, val2.Attribute(attrKey1)) - require.Equal(t, attrVal2, val2.Attribute(attrKey2)) - - m := map[string]string{} - - val2.IterateAttributes(func(key, val string) { - m[key] = val +func collectContainerAttributes(c container.Container) [][2]string { + var res [][2]string + c.IterateAttributes(func(key, value string) { + res = append(res, [2]string{key, value}) }) - - require.GreaterOrEqual(t, len(m), 2) - require.Equal(t, attrVal1, m[attrKey1]) - require.Equal(t, attrVal2, m[attrKey2]) - - val2.SetAttribute(attrKey1, attrVal1+"_") - require.Equal(t, attrVal1+"_", val2.Attribute(attrKey1)) + return res } -func TestContainer_IterateUserAttributes(t *testing.T) { - var cnr container.Container - mSys := make(map[string]string) - mUsr := make(map[string]string) - - for i := 0; i < 10; i++ { - si := strconv.Itoa(i) - - keyUsr := "key" + si - valUsr := "val" + si - keySys := "__NEOFS__" + si - valSys := "sys-val" + si - - mUsr[keyUsr] = valUsr - mSys[keySys] = valSys - - cnr.SetAttribute(keySys, valSys) - cnr.SetAttribute(keyUsr, valUsr) - } - - cnr.IterateUserAttributes(func(key, val string) { - _, isSys := mSys[key] - require.False(t, isSys, key) - require.Equal(t, mUsr[key], val, key) - delete(mUsr, key) +func collectContainerUserAttributes(c container.Container) [][2]string { + var res [][2]string + c.IterateUserAttributes(func(key, value string) { + res = append(res, [2]string{key, value}) }) - - require.Empty(t, mUsr) + return res } -func TestSetName(t *testing.T) { - var val container.Container - - require.Panics(t, func() { - val.SetName("") +func TestContainer_SetAttribute(t *testing.T) { + var c container.Container + require.Panics(t, func() { c.SetAttribute("", "") }) + require.Panics(t, func() { c.SetAttribute("", "val") }) + require.Panics(t, func() { c.SetAttribute("key", "") }) + + const key1, val1 = "some_key1", "some_value1" + const key2, val2 = "some_key2", "some_value2" + + require.Zero(t, c.Attribute(key1)) + require.Zero(t, c.Attribute(key2)) + require.Zero(t, c.NumberOfAttributes()) + require.Zero(t, collectContainerAttributes(c)) + + c.SetAttribute(key1, val1) + c.SetAttribute(key2, val2) + require.Equal(t, val1, c.Attribute(key1)) + require.Equal(t, val2, c.Attribute(key2)) + require.EqualValues(t, 2, c.NumberOfAttributes()) + attrs := collectContainerAttributes(c) + require.Len(t, attrs, 2) + require.Contains(t, attrs, [2]string{key1, val1}) + require.Contains(t, attrs, [2]string{key2, val2}) + + c.SetAttribute(key1, val2) + c.SetAttribute(key2, val1) + require.Equal(t, val2, c.Attribute(key1)) + require.Equal(t, val1, c.Attribute(key2)) + attrs = collectContainerAttributes(c) + require.Len(t, attrs, 2) + require.Contains(t, attrs, [2]string{key1, val2}) + require.Contains(t, attrs, [2]string{key2, val1}) + + t.Run("encoding", func(t *testing.T) { + t.Run("binary", func(t *testing.T) { + var src, dst container.Container + + dst.SetAttribute(key1+key2, val1+val2) + + err := dst.Unmarshal(src.Marshal()) + require.NoError(t, err) + require.Zero(t, dst.Attribute(key1)) + require.Zero(t, dst.Attribute(key2)) + require.Zero(t, dst.NumberOfAttributes()) + require.Zero(t, collectContainerAttributes(dst)) + + src.SetAttribute(key1, val1) + src.SetAttribute(key2, val2) + + err = dst.Unmarshal(src.Marshal()) + require.NoError(t, err) + require.Equal(t, val1, dst.Attribute(key1)) + require.Equal(t, val2, dst.Attribute(key2)) + require.EqualValues(t, 2, dst.NumberOfAttributes()) + attrs := collectContainerAttributes(dst) + require.Len(t, attrs, 2) + require.Contains(t, attrs, [2]string{key1, val1}) + require.Contains(t, attrs, [2]string{key2, val2}) + }) + t.Run("api", func(t *testing.T) { + src := container.New(usertest.ID(), containertest.BasicACL(), netmaptest.PlacementPolicy()) + var dst container.Container + var msg apicontainer.Container + + dst.SetAttribute(key1, val1) + + src.WriteToV2(&msg) + require.Zero(t, msg.Attributes) + require.NoError(t, dst.ReadFromV2(&msg)) + require.Zero(t, dst.Attribute(key1)) + require.Zero(t, dst.Attribute(key2)) + require.Zero(t, dst.NumberOfAttributes()) + require.Zero(t, collectContainerAttributes(dst)) + + src.SetAttribute(key1, val1) + src.SetAttribute(key2, val2) + + src.WriteToV2(&msg) + require.Equal(t, []*apicontainer.Container_Attribute{ + {Key: key1, Value: val1}, + {Key: key2, Value: val2}, + }, msg.Attributes) + + err := dst.ReadFromV2(&msg) + require.NoError(t, err) + require.Equal(t, val1, dst.Attribute(key1)) + require.Equal(t, val2, dst.Attribute(key2)) + require.EqualValues(t, 2, dst.NumberOfAttributes()) + attrs := collectContainerAttributes(dst) + require.Len(t, attrs, 2) + require.Contains(t, attrs, [2]string{key1, val1}) + require.Contains(t, attrs, [2]string{key2, val2}) + }) + t.Run("json", func(t *testing.T) { + var src, dst container.Container + + dst.SetAttribute(key1, val1) + + j, err := src.MarshalJSON() + require.NoError(t, err) + err = dst.UnmarshalJSON(j) + require.NoError(t, err) + require.Zero(t, dst.Attribute(key1)) + require.Zero(t, dst.Attribute(key2)) + require.Zero(t, dst.NumberOfAttributes()) + require.Zero(t, collectContainerAttributes(dst)) + + src.SetAttribute(key1, val1) + src.SetAttribute(key2, val2) + + j, err = src.MarshalJSON() + require.NoError(t, err) + err = dst.UnmarshalJSON(j) + require.NoError(t, err) + require.Equal(t, val1, dst.Attribute(key1)) + require.Equal(t, val2, dst.Attribute(key2)) + require.EqualValues(t, 2, dst.NumberOfAttributes()) + attrs := collectContainerAttributes(dst) + require.Len(t, attrs, 2) + require.Contains(t, attrs, [2]string{key1, val1}) + require.Contains(t, attrs, [2]string{key2, val2}) + }) }) - - val = containertest.Container(t) - - const name = "some name" - - val.SetName(name) - - var msg v2container.Container - val.WriteToV2(&msg) - - assertContainsAttribute(t, msg, "Name", name) - - var val2 container.Container - require.NoError(t, val2.ReadFromV2(msg)) - - require.Equal(t, name, val2.Name()) } -func TestSetCreationTime(t *testing.T) { - var val container.Container - - require.Zero(t, val.CreatedAt().Unix()) +func TestContainer_SetName(t *testing.T) { + var c container.Container - val = containertest.Container(t) - - creat := time.Now() - - val.SetCreationTime(creat) - - var msg v2container.Container - val.WriteToV2(&msg) - - assertContainsAttribute(t, msg, "Timestamp", strconv.FormatInt(creat.Unix(), 10)) - - var val2 container.Container - require.NoError(t, val2.ReadFromV2(msg)) + assertZeroName := func(t testing.TB, c container.Container) { + require.Zero(t, c.Name()) + require.Zero(t, c.Attribute("Name")) + require.Zero(t, c.NumberOfAttributes()) + require.Zero(t, collectContainerAttributes(c)) + require.Zero(t, collectContainerUserAttributes(c)) + } + assertName := func(t testing.TB, c container.Container, name string) { + require.Equal(t, name, c.Name()) + require.Equal(t, name, c.Attribute("Name")) + require.EqualValues(t, 1, c.NumberOfAttributes()) + require.Equal(t, [][2]string{{"Name", name}}, collectContainerAttributes(c)) + require.Equal(t, [][2]string{{"Name", name}}, collectContainerUserAttributes(c)) + } - require.Equal(t, creat.Unix(), val2.CreatedAt().Unix()) + assertZeroName(t, c) + + name := "any_name" + c.SetName(name) + assertName(t, c, name) + + nameOther := name + "_extra" + c.SetName(nameOther) + assertName(t, c, nameOther) + + t.Run("encoding", func(t *testing.T) { + t.Run("binary", func(t *testing.T) { + var src, dst container.Container + + dst.SetName(name) + err := dst.Unmarshal(src.Marshal()) + require.NoError(t, err) + assertZeroName(t, dst) + + src.SetName(name) + err = dst.Unmarshal(src.Marshal()) + require.NoError(t, err) + assertName(t, dst, name) + }) + t.Run("api", func(t *testing.T) { + src := container.New(usertest.ID(), containertest.BasicACL(), netmaptest.PlacementPolicy()) + var dst container.Container + var msg apicontainer.Container + + dst.SetName(name) + src.WriteToV2(&msg) + require.Zero(t, msg.Attributes) + require.NoError(t, dst.ReadFromV2(&msg)) + assertZeroName(t, dst) + + src.SetName(name) + src.WriteToV2(&msg) + require.Equal(t, []*apicontainer.Container_Attribute{ + {Key: "Name", Value: name}, + }, msg.Attributes) + require.NoError(t, dst.ReadFromV2(&msg)) + assertName(t, dst, name) + }) + t.Run("json", func(t *testing.T) { + var src, dst container.Container + + dst.SetName(name) + j, err := src.MarshalJSON() + require.NoError(t, err) + err = dst.UnmarshalJSON(j) + require.NoError(t, err) + assertZeroName(t, dst) + + src.SetName(name) + j, err = src.MarshalJSON() + require.NoError(t, err) + err = dst.UnmarshalJSON(j) + require.NoError(t, err) + assertName(t, dst, name) + }) + }) } -func TestDisableHomomorphicHashing(t *testing.T) { - var val container.Container +func TestContainer_SetCreationTime(t *testing.T) { + var c container.Container - require.False(t, val.IsHomomorphicHashingDisabled()) - - val = containertest.Container(t) - - val.DisableHomomorphicHashing() - - var msg v2container.Container - val.WriteToV2(&msg) - - assertContainsAttribute(t, msg, v2container.SysAttributePrefix+"DISABLE_HOMOMORPHIC_HASHING", "true") - - var val2 container.Container - require.NoError(t, val2.ReadFromV2(msg)) + assertZeroCreationTime := func(t testing.TB, c container.Container) { + require.Zero(t, c.CreatedAt().Unix()) + require.Zero(t, c.Attribute("Timestamp")) + require.Zero(t, c.NumberOfAttributes()) + require.Zero(t, collectContainerAttributes(c)) + require.Zero(t, collectContainerUserAttributes(c)) + } + assertCreationTime := func(t testing.TB, c container.Container, tm time.Time) { + require.Equal(t, tm.Unix(), c.CreatedAt().Unix()) + require.Equal(t, strconv.FormatInt(tm.Unix(), 10), c.Attribute("Timestamp")) + require.EqualValues(t, 1, c.NumberOfAttributes()) + require.Equal(t, [][2]string{{"Timestamp", strconv.FormatInt(tm.Unix(), 10)}}, collectContainerAttributes(c)) + require.Equal(t, [][2]string{{"Timestamp", strconv.FormatInt(tm.Unix(), 10)}}, collectContainerUserAttributes(c)) + } - require.True(t, val2.IsHomomorphicHashingDisabled()) + tm := time.Now() + c.SetCreationTime(tm) + assertCreationTime(t, c, tm) + + tmOther := tm.Add(time.Minute) + c.SetCreationTime(tmOther) + assertCreationTime(t, c, tmOther) + + t.Run("encoding", func(t *testing.T) { + t.Run("binary", func(t *testing.T) { + var src, dst container.Container + + dst.SetCreationTime(tm) + err := dst.Unmarshal(src.Marshal()) + require.NoError(t, err) + assertZeroCreationTime(t, dst) + + src.SetCreationTime(tm) + err = dst.Unmarshal(src.Marshal()) + require.NoError(t, err) + assertCreationTime(t, dst, tm) + }) + t.Run("api", func(t *testing.T) { + src := container.New(usertest.ID(), containertest.BasicACL(), netmaptest.PlacementPolicy()) + var dst container.Container + var msg apicontainer.Container + + dst.SetCreationTime(tm) + src.WriteToV2(&msg) + require.Zero(t, msg.Attributes) + require.NoError(t, dst.ReadFromV2(&msg)) + assertZeroCreationTime(t, dst) + + src.SetCreationTime(tm) + src.WriteToV2(&msg) + require.Equal(t, []*apicontainer.Container_Attribute{{ + Key: "Timestamp", Value: strconv.FormatInt(tm.Unix(), 10), + }}, msg.Attributes) + require.NoError(t, dst.ReadFromV2(&msg)) + assertCreationTime(t, dst, tm) + }) + t.Run("json", func(t *testing.T) { + var src, dst container.Container + + dst.SetCreationTime(tm) + j, err := src.MarshalJSON() + require.NoError(t, err) + err = dst.UnmarshalJSON(j) + require.NoError(t, err) + assertZeroCreationTime(t, dst) + + src.SetCreationTime(tm) + j, err = src.MarshalJSON() + require.NoError(t, err) + err = dst.UnmarshalJSON(j) + require.NoError(t, err) + assertCreationTime(t, dst, tm) + }) + }) } -func TestWriteDomain(t *testing.T) { - var val container.Container - - require.Zero(t, val.ReadDomain().Name()) - - val = containertest.Container(t) - - const name = "domain name" - - var d container.Domain - d.SetName(name) - - val.WriteDomain(d) - - var msg v2container.Container - val.WriteToV2(&msg) - - assertContainsAttribute(t, msg, v2container.SysAttributeName, name) - assertContainsAttribute(t, msg, v2container.SysAttributeZone, "container") - - const zone = "domain zone" - - d.SetZone(zone) - - val.WriteDomain(d) - - val.WriteToV2(&msg) - - assertContainsAttribute(t, msg, v2container.SysAttributeZone, zone) - - var val2 container.Container - require.NoError(t, val2.ReadFromV2(msg)) - - require.Equal(t, d, val2.ReadDomain()) +func assertHomomorphicHashingEnabled(t testing.TB, c container.Container, attr string) { + require.False(t, c.HomomorphicHashingDisabled()) + require.Equal(t, attr, c.Attribute("__NEOFS__DISABLE_HOMOMORPHIC_HASHING")) + require.Zero(t, collectContainerUserAttributes(c)) + if attr != "" { + require.EqualValues(t, 1, c.NumberOfAttributes()) + require.Equal(t, [][2]string{{"__NEOFS__DISABLE_HOMOMORPHIC_HASHING", attr}}, collectContainerAttributes(c)) + } else { + require.Zero(t, c.NumberOfAttributes()) + require.Zero(t, collectContainerAttributes(c)) + } } -func TestCalculateID(t *testing.T) { - val := containertest.Container(t) - - require.False(t, val.AssertID(cidtest.ID())) - - var id cid.ID - val.CalculateID(&id) - - var msg refs.ContainerID - id.WriteToV2(&msg) - - h := sha256.Sum256(val.Marshal()) - require.Equal(t, h[:], msg.GetValue()) - - var id2 cid.ID - require.NoError(t, id2.ReadFromV2(msg)) - - require.True(t, val.AssertID(id2)) +func assertHomomorphicHashingDisabled(t testing.TB, c container.Container) { + require.True(t, c.HomomorphicHashingDisabled()) + require.Equal(t, "true", c.Attribute("__NEOFS__DISABLE_HOMOMORPHIC_HASHING")) + require.EqualValues(t, 1, c.NumberOfAttributes()) + require.Equal(t, [][2]string{{"__NEOFS__DISABLE_HOMOMORPHIC_HASHING", "true"}}, collectContainerAttributes(c)) + require.Zero(t, collectContainerUserAttributes(c)) } -func TestCalculateSignature(t *testing.T) { - val := containertest.Container(t) - - var sig neofscrypto.Signature - - require.Error(t, val.CalculateSignature(&sig, test.RandomSigner(t))) - require.NoError(t, val.CalculateSignature(&sig, test.RandomSignerRFC6979(t))) +func TestContainer_SetHomomorphicHashingDisabled(t *testing.T) { + var c container.Container + + assertHomomorphicHashingEnabled(t, c, "") + + c.SetHomomorphicHashingDisabled(true) + assertHomomorphicHashingDisabled(t, c) + + c.SetHomomorphicHashingDisabled(false) + assertHomomorphicHashingEnabled(t, c, "") + + t.Run("encoding", func(t *testing.T) { + t.Run("binary", func(t *testing.T) { + var src, dst container.Container + + dst.SetHomomorphicHashingDisabled(true) + err := dst.Unmarshal(src.Marshal()) + require.NoError(t, err) + assertHomomorphicHashingEnabled(t, dst, "") + + src.SetHomomorphicHashingDisabled(true) + err = dst.Unmarshal(src.Marshal()) + require.NoError(t, err) + assertHomomorphicHashingDisabled(t, dst) + + src.SetAttribute("__NEOFS__DISABLE_HOMOMORPHIC_HASHING", "any") + err = dst.Unmarshal(src.Marshal()) + require.NoError(t, err) + assertHomomorphicHashingEnabled(t, dst, "any") + }) + t.Run("api", func(t *testing.T) { + src := container.New(usertest.ID(), containertest.BasicACL(), netmaptest.PlacementPolicy()) + var dst container.Container + var msg apicontainer.Container + + dst.SetHomomorphicHashingDisabled(true) + src.WriteToV2(&msg) + require.Zero(t, msg.Attributes) + require.NoError(t, dst.ReadFromV2(&msg)) + assertHomomorphicHashingEnabled(t, dst, "") + + src.SetHomomorphicHashingDisabled(true) + src.WriteToV2(&msg) + require.Equal(t, []*apicontainer.Container_Attribute{{ + Key: "__NEOFS__DISABLE_HOMOMORPHIC_HASHING", Value: "true", + }}, msg.Attributes) + require.NoError(t, dst.ReadFromV2(&msg)) + assertHomomorphicHashingDisabled(t, dst) + + msg.Attributes[0].Value = "any" + require.NoError(t, dst.ReadFromV2(&msg)) + assertHomomorphicHashingEnabled(t, dst, "any") + }) + t.Run("json", func(t *testing.T) { + var src, dst container.Container + + dst.SetHomomorphicHashingDisabled(true) + j, err := src.MarshalJSON() + require.NoError(t, err) + err = dst.UnmarshalJSON(j) + require.NoError(t, err) + assertHomomorphicHashingEnabled(t, dst, "") + + src.SetHomomorphicHashingDisabled(true) + j, err = src.MarshalJSON() + require.NoError(t, err) + err = dst.UnmarshalJSON(j) + require.NoError(t, err) + assertHomomorphicHashingDisabled(t, dst) + + src.SetAttribute("__NEOFS__DISABLE_HOMOMORPHIC_HASHING", "any") + j, err = src.MarshalJSON() + require.NoError(t, err) + err = dst.UnmarshalJSON(j) + require.NoError(t, err) + assertHomomorphicHashingEnabled(t, dst, "any") + }) + }) +} - var msg refs.Signature - sig.WriteToV2(&msg) +func TestContainer_SetDomain(t *testing.T) { + var c container.Container - var sig2 neofscrypto.Signature - require.NoError(t, sig2.ReadFromV2(msg)) + assertNoDomain := func(t testing.TB, c container.Container) { + require.Zero(t, c.Domain()) + require.Zero(t, c.NumberOfAttributes()) + require.Zero(t, c.Attribute("__NEOFS__NAME")) + require.Zero(t, c.Attribute("__NEOFS__ZONE")) + require.Zero(t, collectContainerAttributes(c)) + require.Zero(t, collectContainerUserAttributes(c)) + } + assertDomain := func(t testing.TB, c container.Container, name, zone string) { + require.Equal(t, name, c.Domain().Name()) + require.Equal(t, name, c.Attribute("__NEOFS__NAME")) + require.Zero(t, collectContainerUserAttributes(c)) + if zone != "" && zone != "container" { + require.Equal(t, zone, c.Domain().Zone()) + require.Equal(t, zone, c.Attribute("__NEOFS__ZONE")) + require.EqualValues(t, 2, c.NumberOfAttributes()) + require.ElementsMatch(t, [][2]string{ + {"__NEOFS__NAME", name}, + {"__NEOFS__ZONE", zone}, + }, collectContainerAttributes(c)) + } else { + require.Equal(t, "container", c.Domain().Zone()) + require.Zero(t, c.Attribute("__NEOFS__ZONE")) + require.EqualValues(t, 1, c.NumberOfAttributes()) + require.Equal(t, [][2]string{{"__NEOFS__NAME", name}}, collectContainerAttributes(c)) + } + } - require.True(t, val.VerifySignature(sig2)) + assertNoDomain(t, c) + + var domain container.Domain + domain.SetName("name") + + c.SetDomain(domain) + assertDomain(t, c, "name", "") + + var domainOther container.Domain + domainOther.SetName("name_other") + domainOther.SetZone("zone") + c.SetDomain(domainOther) + assertDomain(t, c, "name_other", "zone") + + t.Run("encoding", func(t *testing.T) { + t.Run("binary", func(t *testing.T) { + var src, dst container.Container + + dst.SetDomain(domain) + err := dst.Unmarshal(src.Marshal()) + require.NoError(t, err) + assertNoDomain(t, dst) + + src.SetDomain(domain) + err = dst.Unmarshal(src.Marshal()) + require.NoError(t, err) + assertDomain(t, dst, "name", "") + + src.SetDomain(domainOther) + err = dst.Unmarshal(src.Marshal()) + require.NoError(t, err) + assertDomain(t, dst, "name_other", "zone") + }) + t.Run("api", func(t *testing.T) { + src := container.New(usertest.ID(), containertest.BasicACL(), netmaptest.PlacementPolicy()) + var dst container.Container + var msg apicontainer.Container + + dst.SetDomain(domain) + src.WriteToV2(&msg) + require.Zero(t, msg.Attributes) + require.NoError(t, dst.ReadFromV2(&msg)) + assertNoDomain(t, dst) + + src.SetDomain(domain) + src.WriteToV2(&msg) + require.Equal(t, []*apicontainer.Container_Attribute{{ + Key: "__NEOFS__NAME", Value: "name", + }}, msg.Attributes) + require.NoError(t, dst.ReadFromV2(&msg)) + assertDomain(t, dst, "name", "") + + src.SetDomain(domainOther) + src.WriteToV2(&msg) + require.ElementsMatch(t, []*apicontainer.Container_Attribute{ + {Key: "__NEOFS__NAME", Value: "name_other"}, + {Key: "__NEOFS__ZONE", Value: "zone"}, + }, msg.Attributes) + require.NoError(t, dst.ReadFromV2(&msg)) + assertDomain(t, dst, "name_other", "zone") + }) + t.Run("json", func(t *testing.T) { + var src, dst container.Container + + dst.SetDomain(domain) + j, err := src.MarshalJSON() + require.NoError(t, err) + err = dst.UnmarshalJSON(j) + require.NoError(t, err) + assertNoDomain(t, dst) + + src.SetDomain(domain) + j, err = src.MarshalJSON() + require.NoError(t, err) + err = dst.UnmarshalJSON(j) + require.NoError(t, err) + assertDomain(t, dst, "name", "") + + src.SetDomain(domainOther) + j, err = src.MarshalJSON() + require.NoError(t, err) + err = dst.UnmarshalJSON(j) + require.NoError(t, err) + assertDomain(t, dst, "name_other", "zone") + }) + }) } diff --git a/container/example_test.go b/container/example_test.go index 0046bb3b6..18aeeb47b 100644 --- a/container/example_test.go +++ b/container/example_test.go @@ -1,9 +1,11 @@ package container_test import ( + "encoding/json" + "fmt" "time" - apiGoContainer "github.com/nspcc-dev/neofs-api-go/v2/container" + apicontainer "github.com/nspcc-dev/neofs-sdk-go/api/container" "github.com/nspcc-dev/neofs-sdk-go/container" "github.com/nspcc-dev/neofs-sdk-go/container/acl" "github.com/nspcc-dev/neofs-sdk-go/netmap" @@ -11,48 +13,47 @@ import ( ) // To create new container in the NeoFS network Container instance should be initialized. -func ExampleContainer_Init() { - // import "github.com/nspcc-dev/neofs-sdk-go/container/acl" - // import "github.com/nspcc-dev/neofs-sdk-go/user" - // import "github.com/nspcc-dev/neofs-sdk-go/netmap" - +func ExampleNew() { var account user.ID + var policy netmap.PlacementPolicy + if err := policy.DecodeString("REP 3"); err != nil { + fmt.Printf("failed to init policy: %v\n", err) + return + } - var cnr container.Container - cnr.Init() + cnr := container.New(account, acl.PublicRWExtended, policy) - // required fields - cnr.SetOwner(account) cnr.SetBasicACL(acl.PublicRWExtended) // optional cnr.SetName("awesome container name") cnr.SetCreationTime(time.Now()) - // ... + cnr.SetAttribute("attr_1", "val_1") + cnr.SetAttribute("attr_2", "val_2") - var rd netmap.ReplicaDescriptor - rd.SetNumberOfObjects(1) + var domain container.Domain + domain.SetName("my-cnr1") + cnr.SetDomain(domain) - // placement policy and replicas definition is required - var pp netmap.PlacementPolicy - pp.SetContainerBackupFactor(1) - pp.SetReplicas([]netmap.ReplicaDescriptor{rd}) + j, err := json.MarshalIndent(cnr, "", " ") + if err != nil { + fmt.Printf("failed to encode container: %v\n", err) + return + } - cnr.SetPlacementPolicy(pp) + fmt.Println(string(j)) } // Instances can be also used to process NeoFS API V2 protocol messages with [https://github.com/nspcc-dev/neofs-api] package. func ExampleContainer_marshalling() { - // import apiGoContainer "github.com/nspcc-dev/neofs-api-go/v2/container" - // On the client side. var cnr container.Container - var msg apiGoContainer.Container + var msg apicontainer.Container cnr.WriteToV2(&msg) // *send message* // On the server side. - _ = cnr.ReadFromV2(msg) + _ = cnr.ReadFromV2(&msg) } diff --git a/container/id/id.go b/container/id/id.go index fdedaf4b5..9b4fd94c1 100644 --- a/container/id/id.go +++ b/container/id/id.go @@ -5,89 +5,58 @@ import ( "fmt" "github.com/mr-tron/base58" - "github.com/nspcc-dev/neofs-api-go/v2/refs" + "github.com/nspcc-dev/neofs-sdk-go/api/refs" ) // ID represents NeoFS container identifier. // -// ID is mutually compatible with github.com/nspcc-dev/neofs-api-go/v2/refs.ContainerID -// message. See ReadFromV2 / WriteToV2 methods. +// ID implements built-in comparable interface. // -// Instances can be created using built-in var declaration. -// -// Note that direct typecast is not safe and may result in loss of compatibility: +// ID is mutually compatible with [refs.ContainerID] message. See +// [ID.ReadFromV2] / [ID.WriteToV2] methods. // -// _ = ID([32]byte) // not recommended +// Instances can be created using built-in var declaration. type ID [sha256.Size]byte -// ReadFromV2 reads ID from the refs.ContainerID message. -// Returns an error if the message is malformed according -// to the NeoFS API V2 protocol. -// -// See also WriteToV2. -func (id *ID) ReadFromV2(m refs.ContainerID) error { - return id.Decode(m.GetValue()) -} - -// WriteToV2 writes ID to the refs.ContainerID message. -// The message must not be nil. -// -// See also ReadFromV2. -func (id ID) WriteToV2(m *refs.ContainerID) { - m.SetValue(id[:]) -} - -// Encode encodes ID into 32 bytes of dst. Panics if -// dst length is less than 32. -// -// Zero ID is all zeros. -// -// See also Decode. -func (id ID) Encode(dst []byte) { - if l := len(dst); l < sha256.Size { - panic(fmt.Sprintf("destination length is less than %d bytes: %d", sha256.Size, l)) +func (id *ID) decodeBinary(b []byte) error { + if len(b) != sha256.Size { + return fmt.Errorf("invalid value length %d", len(b)) } - - copy(dst, id[:]) + copy(id[:], b) + return nil } -// Decode decodes src bytes into ID. -// -// Decode expects that src has 32 bytes length. If the input is malformed, -// Decode returns an error describing format violation. In this case ID -// remains unchanged. +// ReadFromV2 reads ID from the [refs.ContainerID] message. Returns an error if +// the message is malformed according to the NeoFS API V2 protocol. The message +// must not be nil. // -// Decode doesn't mutate src. +// ReadFromV2 is intended to be used by the NeoFS API V2 client/server +// implementation only and is not expected to be directly used by applications. // -// See also Encode. -func (id *ID) Decode(src []byte) error { - if len(src) != sha256.Size { - return fmt.Errorf("invalid length %d", len(src)) +// See also [ID.WriteToV2]. +func (id *ID) ReadFromV2(m *refs.ContainerID) error { + if len(m.Value) == 0 { + return fmt.Errorf("missing value field") } - - copy(id[:], src) - - return nil -} - -// SetSHA256 sets container identifier value to SHA256 checksum of container structure. -func (id *ID) SetSHA256(v [sha256.Size]byte) { - copy(id[:], v[:]) + return id.decodeBinary(m.Value) } -// Equals defines a comparison relation between two ID instances. +// WriteToV2 writes ID to the [refs.ContainerID] message of the NeoFS API +// protocol. +// +// WriteToV2 is intended to be used by the NeoFS API V2 client/server +// implementation only and is not expected to be directly used by applications. // -// Note that comparison using '==' operator is not recommended since it MAY result -// in loss of compatibility. -func (id ID) Equals(id2 ID) bool { - return id == id2 +// See also [ID.ReadFromV2]. +func (id ID) WriteToV2(m *refs.ContainerID) { + m.Value = id[:] } // EncodeToString encodes ID into NeoFS API protocol string. // // Zero ID is base58 encoding of 32 zeros. // -// See also DecodeString. +// See also [ID.DecodeString]. func (id ID) EncodeToString() string { return base58.Encode(id[:]) } @@ -95,29 +64,34 @@ func (id ID) EncodeToString() string { // DecodeString decodes string into ID according to NeoFS API protocol. Returns // an error if s is malformed. // -// See also DecodeString. +// See also [ID.EncodeToString]. func (id *ID) DecodeString(s string) error { - data, err := base58.Decode(s) - if err != nil { - return fmt.Errorf("decode base58: %w", err) + var b []byte + if s != "" { + var err error + b, err = base58.Decode(s) + if err != nil { + return fmt.Errorf("decode base58: %w", err) + } } - - return id.Decode(data) + return id.decodeBinary(b) } -// String implements fmt.Stringer. +// String implements [fmt.Stringer]. // // String is designed to be human-readable, and its format MAY differ between -// SDK versions. String MAY return same result as EncodeToString. String MUST NOT -// be used to encode ID into NeoFS protocol string. +// SDK versions. String MAY return same result as [ID.EncodeToString]. String +// MUST NOT be used to encode ID into NeoFS protocol string. func (id ID) String() string { return id.EncodeToString() } -// FromBinary calculates identifier of the binary-encoded container -// in CAS of the NeoFS containers and writes it into id. -// -// See also [container.Container.CalculateID], [container.Container.AssertID]. -func (id *ID) FromBinary(cnr []byte) { - id.SetSHA256(sha256.Sum256(cnr)) +// IsZero checks whether ID is zero. +func (id ID) IsZero() bool { + for i := range id { + if id[i] != 0 { + return false + } + } + return true } diff --git a/container/id/id_test.go b/container/id/id_test.go index e1b07a0cb..528cb02a1 100644 --- a/container/id/id_test.go +++ b/container/id/id_test.go @@ -1,109 +1,113 @@ package cid_test import ( - "crypto/sha256" - "math/rand" "testing" - "github.com/mr-tron/base58" - "github.com/nspcc-dev/neofs-api-go/v2/refs" + "github.com/nspcc-dev/neofs-sdk-go/api/refs" cid "github.com/nspcc-dev/neofs-sdk-go/container/id" cidtest "github.com/nspcc-dev/neofs-sdk-go/container/id/test" "github.com/stretchr/testify/require" ) -func randSHA256Checksum() (cs [sha256.Size]byte) { - //nolint:staticcheck - rand.Read(cs[:]) - return +func TestIDComparable(t *testing.T) { + id1 := cidtest.ID() + require.True(t, id1 == id1) + id2 := cidtest.ChangeID(id1) + require.NotEqual(t, id1, id2) + require.False(t, id1 == id2) } -const emptyID = "11111111111111111111111111111111" - -func TestID_ToV2(t *testing.T) { - t.Run("non-zero", func(t *testing.T) { - checksum := randSHA256Checksum() - - id := cidtest.IDWithChecksum(checksum) - - var idV2 refs.ContainerID - id.WriteToV2(&idV2) - - var newID cid.ID - require.NoError(t, newID.ReadFromV2(idV2)) - - require.Equal(t, id, newID) - require.Equal(t, checksum[:], idV2.GetValue()) +func TestID_ReadFromV2(t *testing.T) { + t.Run("missing fields", func(t *testing.T) { + t.Run("value", func(t *testing.T) { + id := cidtest.ID() + var m refs.ContainerID + + id.WriteToV2(&m) + m.Value = nil + require.ErrorContains(t, id.ReadFromV2(&m), "missing value field") + m.Value = []byte{} + require.ErrorContains(t, id.ReadFromV2(&m), "missing value field") + }) }) - - t.Run("zero", func(t *testing.T) { - var ( - x cid.ID - v2 refs.ContainerID - ) - - x.WriteToV2(&v2) - require.Equal(t, emptyID, base58.Encode(v2.GetValue())) + t.Run("invalid fields", func(t *testing.T) { + t.Run("value", func(t *testing.T) { + id := cidtest.ID() + var m refs.ContainerID + + id.WriteToV2(&m) + m.Value = make([]byte, 31) + require.ErrorContains(t, id.ReadFromV2(&m), "invalid value length 31") + m.Value = make([]byte, 33) + require.ErrorContains(t, id.ReadFromV2(&m), "invalid value length 33") + }) }) } -func TestID_Equal(t *testing.T) { - cs := randSHA256Checksum() - - id1 := cidtest.IDWithChecksum(cs) - id2 := cidtest.IDWithChecksum(cs) - - require.True(t, id1.Equals(id2)) - - id3 := cidtest.ID() - - require.False(t, id1.Equals(id3)) -} - -func TestID_String(t *testing.T) { - t.Run("DecodeString/EncodeToString", func(t *testing.T) { - id := cidtest.ID() - var id2 cid.ID - - require.NoError(t, id2.DecodeString(id.EncodeToString())) - require.Equal(t, id, id2) - }) +func TestID_DecodeString(t *testing.T) { + var id cid.ID - t.Run("zero", func(t *testing.T) { + const zeroIDString = "11111111111111111111111111111111" + require.Equal(t, zeroIDString, id.EncodeToString()) + id = cidtest.ChangeID(id) + require.NoError(t, id.DecodeString(zeroIDString)) + require.Equal(t, zeroIDString, id.EncodeToString()) + require.Zero(t, id) + + var bin = [32]byte{231, 129, 236, 104, 74, 71, 155, 100, 72, 209, 186, 80, 2, 184, 9, 161, 10, 76, 18, 203, 126, 94, 101, 42, 157, 211, 66, 99, 247, 143, 226, 23} + const str = "Gai5pjZVewwmscQ5UczQbj2W8Wkh9d1BGUoRNzjR6QCN" + require.NoError(t, id.DecodeString(str)) + require.Equal(t, str, id.EncodeToString()) + require.EqualValues(t, bin, id) + + var binOther = [32]byte{216, 146, 23, 99, 156, 90, 232, 244, 202, 213, 0, 92, 22, 194, 164, 150, 233, 163, 175, 199, 187, 45, 65, 7, 190, 124, 77, 99, 8, 172, 36, 112} + const strOther = "FaQGU3PHuHjhHbce1u8AuHuabx4Ra9CxREsMcZffXwM1" + require.NoError(t, id.DecodeString(strOther)) + require.Equal(t, strOther, id.EncodeToString()) + require.EqualValues(t, binOther, id) + + t.Run("invalid", func(t *testing.T) { var id cid.ID - - require.Equal(t, emptyID, id.EncodeToString()) + for _, testCase := range []struct{ input, err string }{ + {input: "not_a_base58_string", err: "decode base58"}, + {input: "", err: "invalid value length 0"}, + {input: "qxAE9SLuDq7dARPAFaWG6vbuGoocwoTn19LK5YVqnS", err: "invalid value length 31"}, + {input: "HJJEkEKthnvMw7NsZNgzBEQ4tf9AffmaBYWxfBULvvbPW", err: "invalid value length 33"}, + } { + require.ErrorContains(t, id.DecodeString(testCase.input), testCase.err, testCase) + } }) -} - -func TestNewFromV2(t *testing.T) { - t.Run("from zero", func(t *testing.T) { - var ( - x cid.ID - v2 refs.ContainerID - ) - - require.Error(t, x.ReadFromV2(v2)) + t.Run("encoding", func(t *testing.T) { + t.Run("api", func(t *testing.T) { + var src, dst cid.ID + var msg refs.ContainerID + + require.NoError(t, dst.DecodeString(str)) + + src.WriteToV2(&msg) + require.Equal(t, make([]byte, 32), msg.Value) + require.NoError(t, dst.ReadFromV2(&msg)) + require.Zero(t, dst) + require.Equal(t, zeroIDString, dst.EncodeToString()) + + require.NoError(t, src.DecodeString(str)) + + src.WriteToV2(&msg) + require.Equal(t, bin[:], msg.Value) + err := dst.ReadFromV2(&msg) + require.NoError(t, err) + require.EqualValues(t, bin, dst) + require.Equal(t, str, dst.EncodeToString()) + }) }) } -func TestID_Encode(t *testing.T) { +func TestID_IsZero(t *testing.T) { var id cid.ID - - t.Run("panic", func(t *testing.T) { - dst := make([]byte, sha256.Size-1) - - require.Panics(t, func() { - id.Encode(dst) - }) - }) - - t.Run("correct", func(t *testing.T) { - dst := make([]byte, sha256.Size) - - require.NotPanics(t, func() { - id.Encode(dst) - }) - require.Equal(t, emptyID, id.EncodeToString()) - }) + require.True(t, id.IsZero()) + for i := range id { + id2 := id + id2[i]++ + require.False(t, id2.IsZero()) + } } diff --git a/container/id/test/id.go b/container/id/test/id.go index 0fa5197fc..b2636b3ab 100644 --- a/container/id/test/id.go +++ b/container/id/test/id.go @@ -1,7 +1,6 @@ package cidtest import ( - "crypto/sha256" "math/rand" cid "github.com/nspcc-dev/neofs-sdk-go/container/id" @@ -9,18 +8,23 @@ import ( // ID returns random cid.ID. func ID() cid.ID { - checksum := [sha256.Size]byte{} + var res cid.ID //nolint:staticcheck - rand.Read(checksum[:]) - - return IDWithChecksum(checksum) + rand.Read(res[:]) + return res } -// IDWithChecksum returns cid.ID initialized -// with specified checksum. -func IDWithChecksum(cs [sha256.Size]byte) cid.ID { - var id cid.ID - id.SetSHA256(cs) +// NIDs returns n random cid.ID instances. +func NIDs(n int) []cid.ID { + res := make([]cid.ID, n) + for i := range res { + res[i] = ID() + } + return res +} +// ChangeID returns container ID other than the given one. +func ChangeID(id cid.ID) cid.ID { + id[0]++ return id } diff --git a/container/id/test/id_test.go b/container/id/test/id_test.go new file mode 100644 index 000000000..ae9b843eb --- /dev/null +++ b/container/id/test/id_test.go @@ -0,0 +1,31 @@ +package cidtest_test + +import ( + "math/rand" + "testing" + + "github.com/nspcc-dev/neofs-sdk-go/api/refs" + cid "github.com/nspcc-dev/neofs-sdk-go/container/id" + cidtest "github.com/nspcc-dev/neofs-sdk-go/container/id/test" + "github.com/stretchr/testify/require" +) + +func TestID(t *testing.T) { + id := cidtest.ID() + require.NotEqual(t, id, cidtest.ID()) + + var m refs.ContainerID + id.WriteToV2(&m) + var id2 cid.ID + require.NoError(t, id2.ReadFromV2(&m)) +} + +func TestChangeID(t *testing.T) { + id := cidtest.ID() + require.NotEqual(t, id, cidtest.ChangeID(id)) +} + +func TestNIDs(t *testing.T) { + n := rand.Int() % 10 + require.Len(t, cidtest.NIDs(n), n) +} diff --git a/container/network.go b/container/network.go index fa5b8cb87..eefa50f36 100644 --- a/container/network.go +++ b/container/network.go @@ -7,14 +7,16 @@ import ( // ApplyNetworkConfig applies network configuration to the // container. Changes the container if it does not satisfy // network configuration. +// +// See also [Container.AssertNetworkConfig]. func (x *Container) ApplyNetworkConfig(cfg netmap.NetworkInfo) { - if cfg.HomomorphicHashingDisabled() { - x.DisableHomomorphicHashing() - } + x.SetHomomorphicHashingDisabled(cfg.HomomorphicHashingDisabled()) } // AssertNetworkConfig checks if a container matches passed // network configuration. +// +// See also [Container.ApplyNetworkConfig]. func (x Container) AssertNetworkConfig(cfg netmap.NetworkInfo) bool { - return x.IsHomomorphicHashingDisabled() == cfg.HomomorphicHashingDisabled() + return x.HomomorphicHashingDisabled() == cfg.HomomorphicHashingDisabled() } diff --git a/container/network_test.go b/container/network_test.go index d80f1d3cd..3fd47546d 100644 --- a/container/network_test.go +++ b/container/network_test.go @@ -3,30 +3,33 @@ package container_test import ( "testing" - containertest "github.com/nspcc-dev/neofs-sdk-go/container/test" - netmaptest "github.com/nspcc-dev/neofs-sdk-go/netmap/test" + "github.com/nspcc-dev/neofs-sdk-go/container" + "github.com/nspcc-dev/neofs-sdk-go/netmap" "github.com/stretchr/testify/require" ) -func TestContainer_NetworkConfig(t *testing.T) { - c := containertest.Container(t) - nc := netmaptest.NetworkInfo() - - t.Run("default", func(t *testing.T) { - require.False(t, c.IsHomomorphicHashingDisabled()) - - res := c.AssertNetworkConfig(nc) - - require.True(t, res) - }) - - nc.DisableHomomorphicHashing() - - t.Run("apply", func(t *testing.T) { - require.False(t, c.IsHomomorphicHashingDisabled()) - - c.ApplyNetworkConfig(nc) - - require.True(t, c.IsHomomorphicHashingDisabled()) - }) +func TestContainer_ApplyNetworkConfig(t *testing.T) { + var c container.Container + var n netmap.NetworkInfo + + require.True(t, c.AssertNetworkConfig(n)) + + n.SetHomomorphicHashingDisabled(true) + require.False(t, c.AssertNetworkConfig(n)) + + for _, testCase := range []struct { + cnr, net bool + }{ + {cnr: false, net: false}, + {cnr: false, net: true}, + {cnr: true, net: false}, + {cnr: true, net: true}, + } { + c.SetHomomorphicHashingDisabled(testCase.cnr) + n.SetHomomorphicHashingDisabled(testCase.net) + require.Equal(t, testCase.cnr == testCase.net, c.AssertNetworkConfig(n), testCase) + + c.ApplyNetworkConfig(n) + require.True(t, c.AssertNetworkConfig(n)) + } } diff --git a/container/size.go b/container/size.go index 2402a5f95..9d67de922 100644 --- a/container/size.go +++ b/container/size.go @@ -4,101 +4,102 @@ import ( "errors" "fmt" - "github.com/nspcc-dev/neofs-api-go/v2/container" - "github.com/nspcc-dev/neofs-api-go/v2/refs" + "github.com/nspcc-dev/neofs-sdk-go/api/container" + "github.com/nspcc-dev/neofs-sdk-go/api/refs" cid "github.com/nspcc-dev/neofs-sdk-go/container/id" ) // SizeEstimation groups information about estimation of the size of the data // stored in the NeoFS container. // -// SizeEstimation is mutually compatible with github.com/nspcc-dev/neofs-api-go/v2/container.UsedSpaceAnnouncement -// message. See ReadFromV2 / WriteToV2 methods. +// SizeEstimation is mutually compatible with +// [container.AnnounceUsedSpaceRequest_Body_Announcement] message. See +// [SizeEstimation.ReadFromV2] / [SizeEstimation.WriteToV2] methods. type SizeEstimation struct { - m container.UsedSpaceAnnouncement + epoch uint64 + val uint64 + cnr cid.ID } -// ReadFromV2 reads SizeEstimation from the container.UsedSpaceAnnouncement message. -// Checks if the message conforms to NeoFS API V2 protocol. +// ReadFromV2 reads SizeEstimation from the +// [container.AnnounceUsedSpaceRequest_Body_Announcement] message. Returns an +// error if the message is malformed according to the NeoFS API V2 protocol. The +// message must not be nil. // -// See also WriteToV2. -func (x *SizeEstimation) ReadFromV2(m container.UsedSpaceAnnouncement) error { - cnrV2 := m.GetContainerID() - if cnrV2 == nil { +// ReadFromV2 is intended to be used by the NeoFS API V2 client/server +// implementation only and is not expected to be directly used by applications. +// +// See also [SizeEstimation.WriteToV2]. +func (x *SizeEstimation) ReadFromV2(m *container.AnnounceUsedSpaceRequest_Body_Announcement) error { + if m.ContainerId == nil { return errors.New("missing container") } - - var cnr cid.ID - - err := cnr.ReadFromV2(*cnrV2) + err := x.cnr.ReadFromV2(m.ContainerId) if err != nil { return fmt.Errorf("invalid container: %w", err) } - x.m = m + x.epoch = m.Epoch + x.val = m.UsedSpace return nil } -// WriteToV2 writes SizeEstimation into the container.UsedSpaceAnnouncement message. -// The message MUST NOT be nil. +// WriteToV2 writes ID to the +// [container.AnnounceUsedSpaceRequest_Body_Announcement] message of the NeoFS +// API protocol. +// +// WriteToV2 is intended to be used by the NeoFS API V2 client/server +// implementation only and is not expected to be directly used by applications. // -// See also ReadFromV2. -func (x SizeEstimation) WriteToV2(m *container.UsedSpaceAnnouncement) { - *m = x.m +// See also [ID.ReadFromV2]. +func (x SizeEstimation) WriteToV2(m *container.AnnounceUsedSpaceRequest_Body_Announcement) { + m.ContainerId = new(refs.ContainerID) + x.cnr.WriteToV2(m.ContainerId) + m.Epoch = x.epoch + m.UsedSpace = x.val } // SetEpoch sets epoch when estimation of the container data size was calculated. // -// See also Epoch. +// See also [SizeEstimation.Epoch]. func (x *SizeEstimation) SetEpoch(epoch uint64) { - x.m.SetEpoch(epoch) + x.epoch = epoch } -// Epoch return epoch set using SetEpoch. +// Epoch returns epoch set using [SizeEstimation.SetEpoch]. // // Zero SizeEstimation represents estimation in zero epoch. func (x SizeEstimation) Epoch() uint64 { - return x.m.GetEpoch() + return x.epoch } -// SetContainer specifies the container for which the amount of data is estimated. -// Required by the NeoFS API protocol. +// SetContainer specifies the container for which the amount of data is +// estimated. Required by the NeoFS API protocol. // -// See also Container. +// See also [SizeEstimation.Container]. func (x *SizeEstimation) SetContainer(cnr cid.ID) { - var cidV2 refs.ContainerID - cnr.WriteToV2(&cidV2) - - x.m.SetContainerID(&cidV2) + x.cnr = cnr } -// Container returns container set using SetContainer. +// Container returns container set using [SizeEstimation.SetContainer]. // // Zero SizeEstimation is not bound to any container (returns zero) which is // incorrect according to NeoFS API protocol. -func (x SizeEstimation) Container() (res cid.ID) { - m := x.m.GetContainerID() - if m != nil { - err := res.ReadFromV2(*m) - if err != nil { - panic(fmt.Errorf("unexpected error from cid.ID.ReadFromV2: %w", err)) - } - } - - return +func (x SizeEstimation) Container() cid.ID { + return x.cnr } // SetValue sets estimated amount of data (in bytes) in the specified container. // -// See also Value. +// See also [SizeEstimation.Value]. func (x *SizeEstimation) SetValue(value uint64) { - x.m.SetUsedSpace(value) + x.val = value } -// Value returns data size estimation set using SetValue. +// Value returns data size estimation set using [SizeEstimation.SetValue]. // // Zero SizeEstimation has zero value. func (x SizeEstimation) Value() uint64 { - return x.m.GetUsedSpace() + return x.val } diff --git a/container/size_test.go b/container/size_test.go index 97cf49dc7..6243c9c7d 100644 --- a/container/size_test.go +++ b/container/size_test.go @@ -1,94 +1,130 @@ package container_test import ( - "crypto/sha256" "testing" - v2container "github.com/nspcc-dev/neofs-api-go/v2/container" - "github.com/nspcc-dev/neofs-api-go/v2/refs" + apicontainer "github.com/nspcc-dev/neofs-sdk-go/api/container" "github.com/nspcc-dev/neofs-sdk-go/container" - cid "github.com/nspcc-dev/neofs-sdk-go/container/id" cidtest "github.com/nspcc-dev/neofs-sdk-go/container/id/test" + containertest "github.com/nspcc-dev/neofs-sdk-go/container/test" "github.com/stretchr/testify/require" ) -func TestSizeEstimation_Epoch(t *testing.T) { - var val container.SizeEstimation - - require.Zero(t, val.Epoch()) - - const epoch = 123 - - val.SetEpoch(epoch) - require.EqualValues(t, epoch, val.Epoch()) - - var msg v2container.UsedSpaceAnnouncement - val.WriteToV2(&msg) - - require.EqualValues(t, epoch, msg.GetEpoch()) +func TestSizeEstimation_ReadFromV2(t *testing.T) { + t.Run("missing fields", func(t *testing.T) { + t.Run("container", func(t *testing.T) { + s := containertest.SizeEstimation() + var m apicontainer.AnnounceUsedSpaceRequest_Body_Announcement + + s.WriteToV2(&m) + m.ContainerId = nil + require.ErrorContains(t, s.ReadFromV2(&m), "missing container") + }) + }) + t.Run("invalid fields", func(t *testing.T) { + t.Run("container", func(t *testing.T) { + t.Run("container", func(t *testing.T) { + s := containertest.SizeEstimation() + var m apicontainer.AnnounceUsedSpaceRequest_Body_Announcement + + s.WriteToV2(&m) + m.ContainerId.Value = nil + require.ErrorContains(t, s.ReadFromV2(&m), "invalid container: missing value field") + m.ContainerId.Value = []byte{} + require.ErrorContains(t, s.ReadFromV2(&m), "invalid container: missing value field") + m.ContainerId.Value = make([]byte, 31) + require.ErrorContains(t, s.ReadFromV2(&m), "invalid container: invalid value length 31") + m.ContainerId.Value = make([]byte, 33) + require.ErrorContains(t, s.ReadFromV2(&m), "invalid container: invalid value length 33") + }) + }) + }) } -func TestSizeEstimation_Container(t *testing.T) { - var val container.SizeEstimation - - require.Zero(t, val.Container()) - - cnr := cidtest.ID() - - val.SetContainer(cnr) - require.True(t, val.Container().Equals(cnr)) +func testSizeEstimationNumField(t *testing.T, get func(container.SizeEstimation) uint64, set func(*container.SizeEstimation, uint64), + getAPI func(*apicontainer.AnnounceUsedSpaceRequest_Body_Announcement) uint64) { + var s container.SizeEstimation - var msg v2container.UsedSpaceAnnouncement - val.WriteToV2(&msg) + require.Zero(t, get(s)) - var msgCnr refs.ContainerID - cnr.WriteToV2(&msgCnr) + const val = 13 + set(&s, val) + require.EqualValues(t, val, get(s)) - require.Equal(t, &msgCnr, msg.GetContainerID()) -} + const valOther = 42 + set(&s, valOther) + require.EqualValues(t, valOther, get(s)) -func TestSizeEstimation_Value(t *testing.T) { - var val container.SizeEstimation + t.Run("encoding", func(t *testing.T) { + t.Run("api", func(t *testing.T) { + var src, dst container.SizeEstimation + var msg apicontainer.AnnounceUsedSpaceRequest_Body_Announcement - require.Zero(t, val.Value()) + // set required data just to satisfy decoder + src.SetContainer(cidtest.ID()) - const value = 876 + set(&dst, val) - val.SetValue(value) - require.EqualValues(t, value, val.Value()) + src.WriteToV2(&msg) + require.Zero(t, getAPI(&msg)) + require.NoError(t, dst.ReadFromV2(&msg)) + require.Zero(t, get(dst)) - var msg v2container.UsedSpaceAnnouncement - val.WriteToV2(&msg) + set(&src, val) - require.EqualValues(t, value, msg.GetUsedSpace()) + src.WriteToV2(&msg) + require.EqualValues(t, val, getAPI(&msg)) + err := dst.ReadFromV2(&msg) + require.NoError(t, err) + require.EqualValues(t, val, get(dst)) + }) + }) } -func TestSizeEstimation_ReadFromV2(t *testing.T) { - const epoch = 654 - const value = 903 - var cnrMsg refs.ContainerID - - var msg v2container.UsedSpaceAnnouncement - - var val container.SizeEstimation - - require.Error(t, val.ReadFromV2(msg)) - - msg.SetContainerID(&cnrMsg) - - require.Error(t, val.ReadFromV2(msg)) +func TestSizeEstimation_SetEpoch(t *testing.T) { + testSizeEstimationNumField(t, container.SizeEstimation.Epoch, (*container.SizeEstimation).SetEpoch, + (*apicontainer.AnnounceUsedSpaceRequest_Body_Announcement).GetEpoch) +} - cnrMsg.SetValue(make([]byte, sha256.Size)) +func TestSizeEstimation_SetValue(t *testing.T) { + testSizeEstimationNumField(t, container.SizeEstimation.Value, (*container.SizeEstimation).SetValue, + (*apicontainer.AnnounceUsedSpaceRequest_Body_Announcement).GetUsedSpace) +} - var cnr cid.ID - require.NoError(t, cnr.ReadFromV2(cnrMsg)) +func TestSizeEstimation_SetContainer(t *testing.T) { + var s container.SizeEstimation - msg.SetEpoch(epoch) - msg.SetUsedSpace(value) + require.Zero(t, s.Container()) - require.NoError(t, val.ReadFromV2(msg)) + cnr := cidtest.ID() - require.EqualValues(t, epoch, val.Epoch()) - require.EqualValues(t, value, val.Value()) - require.EqualValues(t, cnr, val.Container()) + s.SetContainer(cnr) + require.Equal(t, cnr, s.Container()) + + cnrOther := cidtest.ChangeID(cnr) + s.SetContainer(cnrOther) + require.Equal(t, cnrOther, s.Container()) + + t.Run("encoding", func(t *testing.T) { + t.Run("api", func(t *testing.T) { + var src, dst container.SizeEstimation + var msg apicontainer.AnnounceUsedSpaceRequest_Body_Announcement + + dst.SetContainer(cnr) + + src.WriteToV2(&msg) + require.Equal(t, make([]byte, 32), msg.ContainerId.Value) + err := dst.ReadFromV2(&msg) + require.NoError(t, err) + require.Zero(t, dst.Container()) + + dst.SetContainer(cnrOther) + src.SetContainer(cnr) + src.WriteToV2(&msg) + require.Equal(t, cnr[:], msg.ContainerId.Value) + err = dst.ReadFromV2(&msg) + require.NoError(t, err) + require.Equal(t, cnr, dst.Container()) + }) + }) } diff --git a/container/test/generate.go b/container/test/generate.go index 42a7269a0..17c3b9376 100644 --- a/container/test/generate.go +++ b/container/test/generate.go @@ -2,7 +2,8 @@ package containertest import ( "math/rand" - "testing" + "strconv" + "time" "github.com/nspcc-dev/neofs-sdk-go/container" "github.com/nspcc-dev/neofs-sdk-go/container/acl" @@ -12,20 +13,28 @@ import ( ) // Container returns random container.Container. -func Container(t testing.TB) (x container.Container) { - owner := usertest.ID(t) - - x.Init() - x.SetAttribute("some attribute", "value") - x.SetOwner(owner) - x.SetBasicACL(BasicACL()) - x.SetPlacementPolicy(netmaptest.PlacementPolicy()) +func Container() container.Container { + x := container.New(usertest.ID(), BasicACL(), netmaptest.PlacementPolicy()) + x.SetName("name_" + strconv.Itoa(rand.Int())) + x.SetCreationTime(time.Now()) + x.SetHomomorphicHashingDisabled(rand.Int()%2 == 0) + var d container.Domain + d.SetName("domain_" + strconv.Itoa(rand.Int())) + d.SetZone("zone_" + strconv.Itoa(rand.Int())) + x.SetDomain(d) + + nAttr := rand.Int() % 4 + for i := 0; i < nAttr; i++ { + si := strconv.Itoa(rand.Int()) + x.SetAttribute("key_"+si, "val_"+si) + } return x } // SizeEstimation returns random container.SizeEstimation. -func SizeEstimation() (x container.SizeEstimation) { +func SizeEstimation() container.SizeEstimation { + var x container.SizeEstimation x.SetContainer(cidtest.ID()) x.SetEpoch(rand.Uint64()) x.SetValue(rand.Uint64()) @@ -34,7 +43,8 @@ func SizeEstimation() (x container.SizeEstimation) { } // BasicACL returns random acl.Basic. -func BasicACL() (x acl.Basic) { +func BasicACL() acl.Basic { + var x acl.Basic x.FromBits(rand.Uint32()) - return + return x } diff --git a/container/test/generate_test.go b/container/test/generate_test.go new file mode 100644 index 000000000..99ac5eb26 --- /dev/null +++ b/container/test/generate_test.go @@ -0,0 +1,46 @@ +package containertest_test + +import ( + "testing" + + apicontainer "github.com/nspcc-dev/neofs-sdk-go/api/container" + "github.com/nspcc-dev/neofs-sdk-go/container" + containertest "github.com/nspcc-dev/neofs-sdk-go/container/test" + "github.com/stretchr/testify/require" +) + +func TestContainer(t *testing.T) { + v := containertest.Container() + require.NotEqual(t, v, containertest.Container()) + + var v2 container.Container + require.NoError(t, v2.Unmarshal(v.Marshal())) + require.Equal(t, v, v2) + + var m apicontainer.Container + v.WriteToV2(&m) + var v3 container.Container + require.NoError(t, v3.ReadFromV2(&m)) + require.Equal(t, v, v3) + + j, err := v.MarshalJSON() + require.NoError(t, err) + var v4 container.Container + require.NoError(t, v4.UnmarshalJSON(j)) + require.Equal(t, v, v4) +} + +func TestBasicACL(t *testing.T) { + require.NotEqual(t, containertest.BasicACL(), containertest.BasicACL()) +} + +func TestSizeEstimation(t *testing.T) { + v := containertest.SizeEstimation() + require.NotEqual(t, v, containertest.SizeEstimation()) + + var m apicontainer.AnnounceUsedSpaceRequest_Body_Announcement + v.WriteToV2(&m) + var v2 container.SizeEstimation + require.NoError(t, v2.ReadFromV2(&m)) + require.Equal(t, v, v2) +} diff --git a/crypto/api.go b/crypto/api.go new file mode 100644 index 000000000..7fc3265e7 --- /dev/null +++ b/crypto/api.go @@ -0,0 +1,304 @@ +package neofscrypto + +import ( + "errors" + "fmt" + + "github.com/nspcc-dev/neofs-sdk-go/api/refs" + "github.com/nspcc-dev/neofs-sdk-go/api/session" + "github.com/nspcc-dev/neofs-sdk-go/internal/proto" +) + +// Request is a common interface of NeoFS API requests. +type Request interface { + // GetMetaHeader returns meta header attached to the [Request]. + GetMetaHeader() *session.RequestMetaHeader + // GetVerifyHeader returns verification header of the [Request]. + GetVerifyHeader() *session.RequestVerificationHeader +} + +// Response is a common interface of NeoFS API responses. +type Response interface { + // GetMetaHeader returns meta header attached to the [Response]. + GetMetaHeader() *session.ResponseMetaHeader + // GetVerifyHeader returns verification header of the [Response]. + GetVerifyHeader() *session.ResponseVerificationHeader +} + +// SignRequest signs all verified parts of the request using provided +// [neofscrypto.Signer] and returns resulting verification header. Meta header +// must be set in advance if needed. +// +// Optional buffer is used for encoding if it has sufficient size. +func SignRequest(signer Signer, req Request, body proto.Message, buf []byte) (*session.RequestVerificationHeader, error) { + var bodySig []byte + var err error + originVerifyHdr := req.GetVerifyHeader() + if originVerifyHdr == nil { + // sign session message body + b := encodeMessage(body, buf) + bodySig, err = signer.Sign(b) + if err != nil { + return nil, fmt.Errorf("sign body: %w", err) + } + if len(b) > len(buf) { + buf = b + } + } + + // sign meta header + b := encodeMessage(req.GetMetaHeader(), buf) + metaSig, err := signer.Sign(b) + if err != nil { + return nil, fmt.Errorf("sign meta header: %w", err) + } + if len(b) > len(buf) { + buf = b + } + + // sign verification header origin + b = encodeMessage(originVerifyHdr, buf) + verifyOriginSig, err := signer.Sign(b) + if err != nil { + return nil, fmt.Errorf("sign origin of verification header: %w", err) + } + + scheme := refs.SignatureScheme(signer.Scheme()) + pubKey := PublicKeyBytes(signer.Public()) + res := &session.RequestVerificationHeader{ + MetaSignature: &refs.Signature{Key: pubKey, Sign: metaSig, Scheme: scheme}, + OriginSignature: &refs.Signature{Key: pubKey, Sign: verifyOriginSig, Scheme: scheme}, + Origin: originVerifyHdr, + } + if originVerifyHdr == nil { + res.BodySignature = &refs.Signature{Key: pubKey, Sign: bodySig, Scheme: scheme} + } + return res, nil +} + +// VerifyRequest verifies all signatures of given request and its extracted +// body. +func VerifyRequest(req Request, body proto.Message) error { + verifyHdr := req.GetVerifyHeader() + if verifyHdr == nil { + return errors.New("missing verification header") + } + + // pre-calculate max encoded size to allocate single buffer + maxSz := body.MarshaledSize() + metaHdr := req.GetMetaHeader() + for { + if metaHdr.GetOrigin() == nil != (verifyHdr.Origin == nil) { // metaHdr can be nil, verifyHdr cannot + return errors.New("different number of meta and verification headers") + } + + sz := verifyHdr.MarshaledSize() + if sz > maxSz { + maxSz = sz + } + sz = metaHdr.MarshaledSize() + if sz > maxSz { + maxSz = sz + } + + if verifyHdr.Origin == nil { + break + } + verifyHdr = verifyHdr.Origin + metaHdr = metaHdr.Origin + } + + var err error + var bodySig *refs.Signature + metaHdr = req.GetMetaHeader() + verifyHdr = req.GetVerifyHeader() + buf := make([]byte, maxSz) + for { + if verifyHdr.MetaSignature == nil { + return errors.New("missing signature of the meta header") + } + if err = verifyMessageSignature(metaHdr, verifyHdr.MetaSignature, buf); err != nil { + return fmt.Errorf("verify signature of the meta header: %w", err) + } + if verifyHdr.OriginSignature == nil { + return errors.New("missing signature of the origin verification header") + } + if err = verifyMessageSignature(verifyHdr.Origin, verifyHdr.OriginSignature, buf); err != nil { + return fmt.Errorf("verify signature of the origin verification header: %w", err) + } + + if verifyHdr.Origin == nil { + bodySig = verifyHdr.BodySignature + break + } + + if verifyHdr.BodySignature != nil { + return errors.New("body signature is set for non-origin level") + } + + verifyHdr = verifyHdr.Origin + metaHdr = metaHdr.Origin + } + + if bodySig == nil { + return errors.New("missing body signature") + } + if err = verifyMessageSignature(body, bodySig, buf); err != nil { + return fmt.Errorf("verify body signature: %w", err) + } + return nil +} + +// SignResponse signs all verified parts of the response using provided +// [neofscrypto.Signer] and returns resulting verification header. Meta header +// must be set in advance if needed. +// +// Optional buffer is used for encoding if it has sufficient size. +func SignResponse(signer Signer, resp Response, body proto.Message, buf []byte) (*session.ResponseVerificationHeader, error) { + var bodySig []byte + var err error + originVerifyHdr := resp.GetVerifyHeader() + if originVerifyHdr == nil { + // sign session message body + b := encodeMessage(body, buf) + bodySig, err = signer.Sign(b) + if err != nil { + return nil, fmt.Errorf("sign body: %w", err) + } + if len(b) > len(buf) { + buf = b + } + } + + // sign meta header + b := encodeMessage(resp.GetMetaHeader(), buf) + metaSig, err := signer.Sign(b) + if err != nil { + return nil, fmt.Errorf("sign meta header: %w", err) + } + if len(b) > len(buf) { + buf = b + } + + // sign verification header origin + b = encodeMessage(originVerifyHdr, buf) + verifyOriginSig, err := signer.Sign(b) + if err != nil { + return nil, fmt.Errorf("sign origin of verification header: %w", err) + } + + scheme := refs.SignatureScheme(signer.Scheme()) + pubKey := PublicKeyBytes(signer.Public()) + res := &session.ResponseVerificationHeader{ + MetaSignature: &refs.Signature{Key: pubKey, Sign: metaSig, Scheme: scheme}, + OriginSignature: &refs.Signature{Key: pubKey, Sign: verifyOriginSig, Scheme: scheme}, + Origin: originVerifyHdr, + } + if originVerifyHdr == nil { + res.BodySignature = &refs.Signature{Key: pubKey, Sign: bodySig, Scheme: scheme} + } + return res, nil +} + +// VerifyResponse verifies all signatures of given response and its extracted +// body. +func VerifyResponse(resp Response, body proto.Message) error { + verifyHdr := resp.GetVerifyHeader() + if verifyHdr == nil { + return errors.New("missing verification header") + } + + // pre-calculate max encoded size to allocate single buffer + maxSz := body.MarshaledSize() + metaHdr := resp.GetMetaHeader() + for { + if metaHdr.GetOrigin() == nil != (verifyHdr.Origin == nil) { // metaHdr can be nil, verifyHdr cannot + return errors.New("different number of meta and verification headers") + } + + sz := verifyHdr.MarshaledSize() + if sz > maxSz { + maxSz = sz + } + sz = metaHdr.MarshaledSize() + if sz > maxSz { + maxSz = sz + } + + if verifyHdr.Origin == nil { + break + } + verifyHdr = verifyHdr.Origin + metaHdr = metaHdr.Origin + } + + var err error + var bodySig *refs.Signature + metaHdr = resp.GetMetaHeader() + verifyHdr = resp.GetVerifyHeader() + buf := make([]byte, maxSz) + for { + if verifyHdr.MetaSignature == nil { + return errors.New("missing signature of the meta header") + } + if err = verifyMessageSignature(metaHdr, verifyHdr.MetaSignature, buf); err != nil { + return fmt.Errorf("verify signature of the meta header: %w", err) + } + if verifyHdr.OriginSignature == nil { + return errors.New("missing signature of the origin verification header") + } + if err = verifyMessageSignature(verifyHdr.Origin, verifyHdr.OriginSignature, buf); err != nil { + return fmt.Errorf("verify signature of the origin verification header: %w", err) + } + + if verifyHdr.Origin == nil { + bodySig = verifyHdr.BodySignature + break + } + + if verifyHdr.BodySignature != nil { + return errors.New("body signature is set for non-origin level") + } + + verifyHdr = verifyHdr.Origin + metaHdr = metaHdr.Origin + } + + if bodySig == nil { + return errors.New("missing body signature") + } + if err = verifyMessageSignature(body, bodySig, buf); err != nil { + return fmt.Errorf("verify body signature: %w", err) + } + return nil +} + +func verifyMessageSignature(msg proto.Message, sig *refs.Signature, buf []byte) error { + if len(sig.Key) == 0 { + return errors.New("missing public key") + } else if sig.Scheme < 0 { + return fmt.Errorf("invalid scheme %d", sig.Scheme) + } + + pubKey, err := decodePublicKey(Scheme(sig.Scheme), sig.Key) + if err != nil { + return err + } + + if !pubKey.Verify(encodeMessage(msg, buf), sig.Sign) { + return errors.New("signature mismatch") + } + return nil +} + +func encodeMessage(m proto.Message, buf []byte) []byte { + sz := m.MarshaledSize() + var b []byte + if len(buf) >= sz { + b = buf[:sz] + } else { + b = make([]byte, sz) + } + m.MarshalStable(b) + return b +} diff --git a/crypto/api_test.go b/crypto/api_test.go new file mode 100644 index 000000000..17946110c --- /dev/null +++ b/crypto/api_test.go @@ -0,0 +1,325 @@ +package neofscrypto_test + +import ( + "encoding/hex" + "os" + "path/filepath" + "strconv" + "testing" + + "github.com/nspcc-dev/neo-go/pkg/crypto/keys" + "github.com/nspcc-dev/neofs-sdk-go/api/accounting" + "github.com/nspcc-dev/neofs-sdk-go/api/container" + "github.com/nspcc-dev/neofs-sdk-go/api/netmap" + "github.com/nspcc-dev/neofs-sdk-go/api/refs" + "github.com/nspcc-dev/neofs-sdk-go/api/session" + "github.com/nspcc-dev/neofs-sdk-go/api/status" + neofscrypto "github.com/nspcc-dev/neofs-sdk-go/crypto" + neofsecdsa "github.com/nspcc-dev/neofs-sdk-go/crypto/ecdsa" + internalproto "github.com/nspcc-dev/neofs-sdk-go/internal/proto" + "github.com/stretchr/testify/require" + "google.golang.org/protobuf/proto" +) + +type request interface { + proto.Message + neofscrypto.Request +} + +type response interface { + proto.Message + neofscrypto.Response +} + +func verifyRequestFromFile(t testing.TB, file string, req request, getBody func(request) internalproto.Message) { + b, err := os.ReadFile(filepath.Join(testDataDir, file)) + require.NoError(t, err) + b, err = hex.DecodeString(string(b)) + require.NoError(t, err) + require.NoError(t, proto.Unmarshal(b, req)) + require.NoError(t, neofscrypto.VerifyRequest(req, getBody(req))) +} + +func verifyResponseFromFile(t testing.TB, file string, resp response, getBody func(response) internalproto.Message) { + b, err := os.ReadFile(filepath.Join(testDataDir, file)) + require.NoError(t, err) + b, err = hex.DecodeString(string(b)) + require.NoError(t, err) + require.NoError(t, proto.Unmarshal(b, resp)) + require.NoError(t, neofscrypto.VerifyResponse(resp, getBody(resp))) +} + +func testSignRequest(t testing.TB, req request, body internalproto.Message, meta **session.RequestMetaHeader, verif **session.RequestVerificationHeader) { + require.Error(t, neofscrypto.VerifyRequest(req, body)) + + key1, err := keys.NewPrivateKey() + require.NoError(t, err) + key2, err := keys.NewPrivateKey() + require.NoError(t, err) + key3, err := keys.NewPrivateKey() + require.NoError(t, err) + + signers := []neofscrypto.Signer{ + neofsecdsa.Signer(key1.PrivateKey), + neofsecdsa.SignerRFC6979(key2.PrivateKey), + neofsecdsa.SignerWalletConnect(key3.PrivateKey), + } + + for i := range signers { + n := 100*i + 1 + *meta = &session.RequestMetaHeader{ + Version: &refs.Version{Major: uint32(n), Minor: uint32(n + 1)}, + Epoch: uint64(n + 3), + Ttl: uint32(n + 4), + MagicNumber: uint64(n + 5), + XHeaders: []*session.XHeader{ + {Key: "xheader_key" + strconv.Itoa(n), Value: "xheader_val" + strconv.Itoa(n)}, + {Key: "xheader_key" + strconv.Itoa(n+1), Value: "xheader_val" + strconv.Itoa(n+1)}, + }, + Origin: *meta, + } + + *verif, err = neofscrypto.SignRequest(signers[i], req, body, nil) + require.NoError(t, err) + } + + require.NoError(t, neofscrypto.VerifyRequest(req, body)) +} + +func testSignResponse(t testing.TB, resp response, body internalproto.Message, meta **session.ResponseMetaHeader, verif **session.ResponseVerificationHeader) { + require.Error(t, neofscrypto.VerifyResponse(resp, body)) + + key1, err := keys.NewPrivateKey() + require.NoError(t, err) + key2, err := keys.NewPrivateKey() + require.NoError(t, err) + key3, err := keys.NewPrivateKey() + require.NoError(t, err) + + signers := []neofscrypto.Signer{ + neofsecdsa.Signer(key1.PrivateKey), + neofsecdsa.SignerRFC6979(key2.PrivateKey), + neofsecdsa.SignerWalletConnect(key3.PrivateKey), + } + + for i := range signers { + n := 100*i + 1 + *meta = &session.ResponseMetaHeader{ + Version: &refs.Version{Major: uint32(n), Minor: uint32(n + 1)}, + Epoch: uint64(n + 3), + Ttl: uint32(n + 4), + XHeaders: []*session.XHeader{ + {Key: "xheader_key" + strconv.Itoa(n), Value: "xheader_val" + strconv.Itoa(n)}, + {Key: "xheader_key" + strconv.Itoa(n+1), Value: "xheader_val" + strconv.Itoa(n+1)}, + }, + Origin: *meta, + Status: &status.Status{Code: uint32(n + 6)}, + } + + *verif, err = neofscrypto.SignResponse(signers[i], resp, body, nil) + require.NoError(t, err) + } + + require.NoError(t, neofscrypto.VerifyResponse(resp, body)) +} + +func TestAPIVerify(t *testing.T) { + t.Run("accounting", func(t *testing.T) { + t.Run("balance", func(t *testing.T) { + t.Run("request", func(t *testing.T) { + verifyRequestFromFile(t, "accounting_balance_request", new(accounting.BalanceRequest), func(r request) internalproto.Message { + return r.(*accounting.BalanceRequest).Body + }) + }) + t.Run("response", func(t *testing.T) { + verifyResponseFromFile(t, "accounting_balance_response", new(accounting.BalanceResponse), func(r response) internalproto.Message { + return r.(*accounting.BalanceResponse).Body + }) + }) + }) + }) + t.Run("container", func(t *testing.T) { + t.Run("put", func(t *testing.T) { + t.Run("request", func(t *testing.T) { + verifyRequestFromFile(t, "container_put_request", new(container.PutRequest), func(r request) internalproto.Message { + return r.(*container.PutRequest).Body + }) + }) + t.Run("response", func(t *testing.T) { + verifyResponseFromFile(t, "container_put_response", new(container.PutResponse), func(r response) internalproto.Message { + return r.(*container.PutResponse).Body + }) + }) + }) + t.Run("get", func(t *testing.T) { + t.Run("request", func(t *testing.T) { + verifyRequestFromFile(t, "container_get_request", new(container.GetRequest), func(r request) internalproto.Message { + return r.(*container.GetRequest).Body + }) + }) + t.Run("response", func(t *testing.T) { + verifyResponseFromFile(t, "container_get_response", new(container.GetResponse), func(r response) internalproto.Message { + return r.(*container.GetResponse).Body + }) + }) + }) + t.Run("delete", func(t *testing.T) { + t.Run("request", func(t *testing.T) { + verifyRequestFromFile(t, "container_delete_request", new(container.DeleteRequest), func(r request) internalproto.Message { + return r.(*container.DeleteRequest).Body + }) + }) + t.Run("response", func(t *testing.T) { + verifyResponseFromFile(t, "container_delete_response", new(container.DeleteResponse), func(r response) internalproto.Message { + return r.(*container.DeleteResponse).Body + }) + }) + }) + t.Run("list", func(t *testing.T) { + t.Run("request", func(t *testing.T) { + verifyRequestFromFile(t, "container_list_request", new(container.ListRequest), func(r request) internalproto.Message { + return r.(*container.ListRequest).Body + }) + }) + t.Run("response", func(t *testing.T) { + verifyResponseFromFile(t, "container_list_response", new(container.ListResponse), func(r response) internalproto.Message { + return r.(*container.ListResponse).Body + }) + }) + }) + }) +} + +func TestAPISign(t *testing.T) { + t.Run("accounting", func(t *testing.T) { + t.Run("balance", func(t *testing.T) { + t.Run("request", func(t *testing.T) { + req := &accounting.BalanceRequest{Body: &accounting.BalanceRequest_Body{ + OwnerId: &refs.OwnerID{Value: []byte("any_user")}, + }} + testSignRequest(t, req, req.Body, &req.MetaHeader, &req.VerifyHeader) + }) + t.Run("response", func(t *testing.T) { + req := &accounting.BalanceResponse{Body: &accounting.BalanceResponse_Body{ + Balance: &accounting.Decimal{Value: 1, Precision: 2}, + }} + testSignResponse(t, req, req.Body, &req.MetaHeader, &req.VerifyHeader) + }) + }) + }) + t.Run("container", func(t *testing.T) { + t.Run("put", func(t *testing.T) { + t.Run("request", func(t *testing.T) { + req := &container.PutRequest{Body: &container.PutRequest_Body{ + Container: &container.Container{ + Version: &refs.Version{Major: 1, Minor: 2}, + OwnerId: &refs.OwnerID{Value: []byte("any_user")}, + Nonce: []byte("any_nonce"), + BasicAcl: 3, + Attributes: []*container.Container_Attribute{ + {Key: "attr_key1", Value: "attr_val1"}, + {Key: "attr_key2", Value: "attr_val2"}, + }, + PlacementPolicy: &netmap.PlacementPolicy{ + Replicas: []*netmap.Replica{{Count: 4}}, + ContainerBackupFactor: 5, + }, + }, + Signature: &refs.SignatureRFC6979{ + Key: []byte("any_public_key"), + Sign: []byte("any_signature"), + }, + }} + testSignRequest(t, req, req.Body, &req.MetaHeader, &req.VerifyHeader) + }) + t.Run("response", func(t *testing.T) { + req := &container.PutResponse{Body: &container.PutResponse_Body{ + ContainerId: &refs.ContainerID{Value: []byte("any_container")}, + }} + testSignResponse(t, req, req.Body, &req.MetaHeader, &req.VerifyHeader) + }) + }) + t.Run("get", func(t *testing.T) { + t.Run("request", func(t *testing.T) { + req := &container.GetRequest{Body: &container.GetRequest_Body{ + ContainerId: &refs.ContainerID{Value: []byte("any_container_id")}, + }} + testSignRequest(t, req, req.Body, &req.MetaHeader, &req.VerifyHeader) + }) + t.Run("response", func(t *testing.T) { + req := &container.GetResponse{Body: &container.GetResponse_Body{ + Container: &container.Container{ + Version: &refs.Version{Major: 1, Minor: 2}, + OwnerId: &refs.OwnerID{Value: []byte("any_user")}, + Nonce: []byte("any_nonce"), + BasicAcl: 3, + Attributes: []*container.Container_Attribute{ + {Key: "attr_key1", Value: "attr_val1"}, + {Key: "attr_key2", Value: "attr_val2"}, + }, + PlacementPolicy: &netmap.PlacementPolicy{ + Replicas: []*netmap.Replica{{Count: 4}}, + ContainerBackupFactor: 5, + }, + }, + Signature: &refs.SignatureRFC6979{ + Key: []byte("any_public_key"), + Sign: []byte("any_signature"), + }, + SessionToken: &session.SessionToken{ + Body: &session.SessionToken_Body{ + Id: []byte("any_ID"), + OwnerId: &refs.OwnerID{Value: []byte("any_user")}, + Lifetime: &session.SessionToken_Body_TokenLifetime{Exp: 101, Nbf: 102, Iat: 103}, + SessionKey: []byte("any_session_key"), + Context: &session.SessionToken_Body_Container{Container: &session.ContainerSessionContext{ + Verb: 200, + Wildcard: true, + ContainerId: &refs.ContainerID{Value: []byte("any_container")}, + }}, + }, + Signature: &refs.Signature{ + Key: []byte("any_public_key"), + Sign: []byte("any_signature"), + Scheme: 123, + }, + }, + }} + testSignResponse(t, req, req.Body, &req.MetaHeader, &req.VerifyHeader) + }) + }) + t.Run("delete", func(t *testing.T) { + t.Run("request", func(t *testing.T) { + req := &container.DeleteRequest{Body: &container.DeleteRequest_Body{ + ContainerId: &refs.ContainerID{Value: []byte("any_container_id")}, + Signature: &refs.SignatureRFC6979{ + Key: []byte("any_public_key"), + Sign: []byte("any_signature"), + }, + }} + testSignRequest(t, req, req.Body, &req.MetaHeader, &req.VerifyHeader) + }) + t.Run("response", func(t *testing.T) { + req := &container.DeleteResponse{Body: &container.DeleteResponse_Body{}} + testSignResponse(t, req, req.Body, &req.MetaHeader, &req.VerifyHeader) + }) + }) + t.Run("list", func(t *testing.T) { + t.Run("request", func(t *testing.T) { + req := &container.ListRequest{Body: &container.ListRequest_Body{ + OwnerId: &refs.OwnerID{Value: []byte("any_user")}, + }} + testSignRequest(t, req, req.Body, &req.MetaHeader, &req.VerifyHeader) + }) + t.Run("response", func(t *testing.T) { + req := &container.ListResponse{Body: &container.ListResponse_Body{ + ContainerIds: []*refs.ContainerID{ + {Value: []byte("any_container1")}, + {Value: []byte("any_container2")}, + }, + }} + testSignResponse(t, req, req.Body, &req.MetaHeader, &req.VerifyHeader) + }) + }) + }) +} diff --git a/crypto/crypto_test.go b/crypto/crypto_test.go index 4549cc55e..2077a73e1 100644 --- a/crypto/crypto_test.go +++ b/crypto/crypto_test.go @@ -5,12 +5,14 @@ import ( "testing" "github.com/nspcc-dev/neo-go/pkg/crypto/keys" - "github.com/nspcc-dev/neofs-api-go/v2/refs" + "github.com/nspcc-dev/neofs-sdk-go/api/refs" neofscrypto "github.com/nspcc-dev/neofs-sdk-go/crypto" neofsecdsa "github.com/nspcc-dev/neofs-sdk-go/crypto/ecdsa" "github.com/stretchr/testify/require" ) +const testDataDir = "testdata" + func TestSignature(t *testing.T) { data := make([]byte, 512) //nolint:staticcheck @@ -40,7 +42,7 @@ func TestSignature(t *testing.T) { s.WriteToV2(&m) - require.NoError(t, s.ReadFromV2(m)) + require.NoError(t, s.ReadFromV2(&m)) valid := s.Verify(data) require.True(t, valid, "type %T", signer) diff --git a/crypto/ecdsa/wallet_connect.go b/crypto/ecdsa/wallet_connect.go index 5c46f69ad..c65826a16 100644 --- a/crypto/ecdsa/wallet_connect.go +++ b/crypto/ecdsa/wallet_connect.go @@ -3,14 +3,20 @@ package neofsecdsa import ( "crypto/ecdsa" "crypto/elliptic" + "crypto/rand" + "crypto/sha256" "encoding/base64" + "encoding/hex" "fmt" + "math/big" "github.com/nspcc-dev/neo-go/pkg/crypto/keys" - "github.com/nspcc-dev/neofs-api-go/v2/util/signature/walletconnect" + "github.com/nspcc-dev/neo-go/pkg/io" neofscrypto "github.com/nspcc-dev/neofs-sdk-go/crypto" ) +const saltLen = 16 + // SignerWalletConnect is similar to SignerRFC6979 with 2 changes: // 1. The data is base64 encoded before signing/verifying. // 2. The signature is a concatenation of the signature itself and 16-byte salt. @@ -29,7 +35,16 @@ func (x SignerWalletConnect) Scheme() neofscrypto.Scheme { func (x SignerWalletConnect) Sign(data []byte) ([]byte, error) { b64 := make([]byte, base64.StdEncoding.EncodedLen(len(data))) base64.StdEncoding.Encode(b64, data) - return walletconnect.Sign((*ecdsa.PrivateKey)(&x), b64) + var salt [saltLen]byte + _, err := rand.Read(salt[:]) + if err != nil { + return nil, fmt.Errorf("randomize salt: %w", err) + } + sig, err := SignerRFC6979(x).Sign(saltMessageWalletConnect(b64, salt[:])) + if err != nil { + return nil, err + } + return append(sig, salt[:]...), nil } // Public initializes PublicKey and returns it as neofscrypto.PublicKey. @@ -79,7 +94,41 @@ func (x *PublicKeyWalletConnect) Decode(data []byte) error { // Verify verifies data signature calculated by ECDSA algorithm with SHA-512 hashing. func (x PublicKeyWalletConnect) Verify(data, signature []byte) bool { + if len(signature) != keys.SignatureLen+saltLen { + return false + } b64 := make([]byte, base64.StdEncoding.EncodedLen(len(data))) base64.StdEncoding.Encode(b64, data) - return walletconnect.Verify((*ecdsa.PublicKey)(&x), b64, signature) + return verifyWalletConnect((*ecdsa.PublicKey)(&x), b64, signature[:keys.SignatureLen], signature[keys.SignatureLen:]) +} + +func verifyWalletConnect(key *ecdsa.PublicKey, data, sig, salt []byte) bool { + h := sha256.Sum256(saltMessageWalletConnect(data, salt)) + var r, s big.Int + r.SetBytes(sig[:keys.SignatureLen/2]) + s.SetBytes(sig[keys.SignatureLen/2:]) + return ecdsa.Verify(key, h[:], &r, &s) +} + +// saltMessageWalletConnect calculates signed message for given data and salt +// according to WalletConnect. +func saltMessageWalletConnect(data, salt []byte) []byte { + saltedLen := hex.EncodedLen(len(salt)) + len(data) + b := make([]byte, 4+getVarIntSize(saltedLen)+saltedLen+2) + b[0], b[1], b[2], b[3] = 0x01, 0x00, 0x01, 0xf0 + n := 4 + io.PutVarUint(b[4:], uint64(saltedLen)) + n += hex.Encode(b[n:], salt) + n += copy(b[n:], data) + b[n], b[n+1] = 0x00, 0x00 + return b +} + +// copy-paste from neo-go. +func getVarIntSize(value int) int { + if value < 0xFD { + return 1 + } else if value <= 0xFFFF { + return 3 + } + return 5 } diff --git a/crypto/ecdsa/wallet_connect_test.go b/crypto/ecdsa/wallet_connect_test.go new file mode 100644 index 000000000..b715214f5 --- /dev/null +++ b/crypto/ecdsa/wallet_connect_test.go @@ -0,0 +1,83 @@ +package neofsecdsa + +import ( + "crypto/ecdsa" + "crypto/elliptic" + "crypto/rand" + "encoding/hex" + "testing" + + "github.com/stretchr/testify/require" +) + +func TestSignerWalletConnect_Sign(t *testing.T) { + key, err := ecdsa.GenerateKey(elliptic.P256(), rand.Reader) + require.NoError(t, err) + signer := (*SignerWalletConnect)(key) + + data := []byte("some_message") + sig, err := signer.Sign(data) + require.NoError(t, err) + require.Len(t, sig, 64+16) + require.True(t, signer.Public().Verify(data, sig)) +} + +func TestPublicKey_Verify(t *testing.T) { + const data = "Hello, world!" + const pubKeyHex = "02b6027dffdc35e66d4469ae68c8382a8eb7e40b4dd01d812b0470a7d4864fb858" + const sigHex = "ae6ea59b2028f696d9c2626aba9677800a0fcd5441aa7f66d085c9c070de61faa11d6002e821d4407bb8b8690571d6ac7e79706510d6c89175705d156a234c53d3af772b1846f96034fd4fa740744553" + + b, err := hex.DecodeString(pubKeyHex) + require.NoError(t, err) + + var pubKey PublicKeyWalletConnect + require.NoError(t, pubKey.Decode(b)) + + sig, err := hex.DecodeString(sigHex) + require.NoError(t, err) + + require.True(t, pubKey.Verify([]byte(data), sig)) +} + +func TestVerifyWalletConnect(t *testing.T) { + for _, testCase := range []struct { + msgHex string + pubKeyHex string + sigHex string + saltHex string + }{ + { // Test values from this GIF https://github.com/CityOfZion/neon-wallet/pull/2390 . + msgHex: "436172616c686f2c206d756c65712c206f2062616775697520656820697373756d65726d6f2074616978206c696761646f206e61206d697373e36f3f", + pubKeyHex: "02ce6228ba2cb2fc235be93aff9cd5fc0851702eb9791552f60db062f01e3d83f6", + saltHex: "d41e348afccc2f3ee45cd9f5128b16dc", + sigHex: "90ab1886ca0bece59b982d9ade8f5598065d651362fb9ce45ad66d0474b89c0b80913c8f0118a282acbdf200a429ba2d81bc52534a53ab41a2c6dfe2f0b4fb1b", + }, + { // Test value from wallet connect integration test + msgHex: "313233343536", // ascii string "123456" + pubKeyHex: "03bd9108c0b49f657e9eee50d1399022bd1e436118e5b7529a1b7cd606652f578f", + saltHex: "2c5b189569e92cce12e1c640f23e83ba", + sigHex: "510caa8cb6db5dedf04d215a064208d64be7496916d890df59aee132db8f2b07532e06f7ea664c4a99e3bcb74b43a35eb9653891b5f8701d2aef9e7526703eaa", + }, + { // Test value from wallet connect integration test + msgHex: "", + pubKeyHex: "03bd9108c0b49f657e9eee50d1399022bd1e436118e5b7529a1b7cd606652f578f", + saltHex: "58c86b2e74215b4f36b47d731236be3b", + sigHex: "1e13f248962d8b3b60708b55ddf448d6d6a28c6b43887212a38b00bf6bab695e61261e54451c6e3d5f1f000e5534d166c7ca30f662a296d3a9aafa6d8c173c01", + }, + } { + pubKeyBin, err := hex.DecodeString(testCase.pubKeyHex) + require.NoError(t, err) + sig, err := hex.DecodeString(testCase.sigHex) + require.NoError(t, err) + salt, err := hex.DecodeString(testCase.saltHex) + require.NoError(t, err) + msg, err := hex.DecodeString(testCase.msgHex) + require.NoError(t, err) + + pubKey := ecdsa.PublicKey{Curve: elliptic.P256()} + pubKey.X, pubKey.Y = elliptic.UnmarshalCompressed(pubKey.Curve, pubKeyBin) + require.NotNil(t, pubKey.X) + require.True(t, verifyWalletConnect(&pubKey, msg, sig, salt), testCase) + } + +} diff --git a/crypto/example_test.go b/crypto/example_test.go index 38cfcf74d..ddf6b1f85 100644 --- a/crypto/example_test.go +++ b/crypto/example_test.go @@ -1,7 +1,7 @@ package neofscrypto_test import ( - "github.com/nspcc-dev/neofs-api-go/v2/refs" + "github.com/nspcc-dev/neofs-sdk-go/api/refs" neofscrypto "github.com/nspcc-dev/neofs-sdk-go/crypto" ) @@ -28,7 +28,7 @@ func ExampleSignature_Verify() { // Instances can be also used to process NeoFS API V2 protocol messages with [https://github.com/nspcc-dev/neofs-api] package. func ExampleSignature_marshalling() { - // import "github.com/nspcc-dev/neofs-api-go/v2/refs" + // import "github.com/nspcc-dev/neofs-sdk-go/api/refs" // On the client side. @@ -39,5 +39,5 @@ func ExampleSignature_marshalling() { // On the server side. - _ = sig.ReadFromV2(msg) + _ = sig.ReadFromV2(&msg) } diff --git a/crypto/signature.go b/crypto/signature.go index 28ec23c03..bc320b5b5 100644 --- a/crypto/signature.go +++ b/crypto/signature.go @@ -1,10 +1,11 @@ package neofscrypto import ( + "bytes" "errors" "fmt" - "github.com/nspcc-dev/neofs-api-go/v2/refs" + "github.com/nspcc-dev/neofs-sdk-go/api/refs" ) // StablyMarshallable describes structs which can be marshalled transparently. @@ -16,13 +17,17 @@ type StablyMarshallable interface { // Signature represents a confirmation of data integrity received by the // digital signature mechanism. // -// Signature is mutually compatible with github.com/nspcc-dev/neofs-api-go/v2/refs.Signature +// Signature is mutually compatible with github.com/nspcc-dev/neofs-sdk-go/api/refs.Signature // message. See ReadFromV2 / WriteToV2 methods. // // Note that direct typecast is not safe and may result in loss of compatibility: // // _ = Signature(refs.Signature{}) // not recommended -type Signature refs.Signature +type Signature struct { + scheme Scheme + pubKey []byte + val []byte +} // NewSignature is a Signature instance constructor. func NewSignature(scheme Scheme, publicKey PublicKey, value []byte) Signature { @@ -31,11 +36,18 @@ func NewSignature(scheme Scheme, publicKey PublicKey, value []byte) Signature { return s } +// CopyTo writes deep copy of the Signature to dst. +func (x Signature) CopyTo(dst *Signature) { + dst.scheme = x.scheme + dst.pubKey = bytes.Clone(x.pubKey) + dst.val = bytes.Clone(x.val) +} + // ReadFromV2 reads Signature from the refs.Signature message. Checks if the // message conforms to NeoFS API V2 protocol. // // See also WriteToV2. -func (x *Signature) ReadFromV2(m refs.Signature) error { +func (x *Signature) ReadFromV2(m *refs.Signature) error { bPubKey := m.GetKey() if len(bPubKey) == 0 { return errors.New("missing public key") @@ -46,12 +58,14 @@ func (x *Signature) ReadFromV2(m refs.Signature) error { return errors.New("missing signature") } - _, err := decodePublicKey(m) + _, err := decodePublicKey(Scheme(m.Scheme), m.Key) if err != nil { return err } - *x = Signature(m) + x.scheme = Scheme(m.Scheme) + x.pubKey = m.Key + x.val = m.Sign return nil } @@ -61,7 +75,9 @@ func (x *Signature) ReadFromV2(m refs.Signature) error { // // See also ReadFromV2. func (x Signature) WriteToV2(m *refs.Signature) { - *m = refs.Signature(x) + m.Scheme = refs.SignatureScheme(x.scheme) + m.Key = x.pubKey + m.Sign = x.val } // Calculate signs data using Signer and encodes public key for subsequent @@ -115,11 +131,9 @@ func (x *Signature) CalculateMarshalled(signer Signer, obj StablyMarshallable, b // // See also Calculate. func (x Signature) Verify(data []byte) bool { - m := refs.Signature(x) + key, err := decodePublicKey(x.scheme, x.pubKey) - key, err := decodePublicKey(m) - - return err == nil && key.Verify(data, m.GetSign()) + return err == nil && key.Verify(data, x.val) } func (x *Signature) fillSignature(signer Signer, signature []byte) { @@ -127,10 +141,9 @@ func (x *Signature) fillSignature(signer Signer, signature []byte) { } func (x *Signature) setFields(scheme Scheme, publicKey PublicKey, value []byte) { - m := (*refs.Signature)(x) - m.SetScheme(refs.SignatureScheme(scheme)) - m.SetSign(value) - m.SetKey(PublicKeyBytes(publicKey)) + x.scheme = scheme + x.val = value + x.pubKey = PublicKeyBytes(publicKey) } // Scheme returns signature scheme used by signer to calculate the signature. @@ -138,7 +151,7 @@ func (x *Signature) setFields(scheme Scheme, publicKey PublicKey, value []byte) // Scheme MUST NOT be called before [NewSignature], [Signature.ReadFromV2] or // [Signature.Calculate] methods. func (x Signature) Scheme() Scheme { - return Scheme((*refs.Signature)(&x).GetScheme()) + return x.scheme } // PublicKey returns public key of the signer which calculated the signature. @@ -148,7 +161,7 @@ func (x Signature) Scheme() Scheme { // // See also [Signature.PublicKeyBytes]. func (x Signature) PublicKey() PublicKey { - key, _ := decodePublicKey(refs.Signature(x)) + key, _ := decodePublicKey(x.scheme, x.pubKey) return key } @@ -163,7 +176,7 @@ func (x Signature) PublicKey() PublicKey { // // See also [Signature.PublicKey]. func (x Signature) PublicKeyBytes() []byte { - return (*refs.Signature)(&x).GetKey() + return x.pubKey } // Value returns calculated digital signature. @@ -174,12 +187,10 @@ func (x Signature) PublicKeyBytes() []byte { // Value MUST NOT be called before [NewSignature], [Signature.ReadFromV2] or // [Signature.Calculate] methods. func (x Signature) Value() []byte { - return (*refs.Signature)(&x).GetSign() + return x.val } -func decodePublicKey(m refs.Signature) (PublicKey, error) { - scheme := Scheme(m.GetScheme()) - +func decodePublicKey(scheme Scheme, b []byte) (PublicKey, error) { newPubKey, ok := publicKeys[scheme] if !ok { return nil, fmt.Errorf("unsupported scheme %d", scheme) @@ -187,7 +198,7 @@ func decodePublicKey(m refs.Signature) (PublicKey, error) { pubKey := newPubKey() - err := pubKey.Decode(m.GetKey()) + err := pubKey.Decode(b) if err != nil { return nil, fmt.Errorf("decode public key from binary: %w", err) } diff --git a/crypto/signature_test.go b/crypto/signature_test.go index 6e01fbe9d..86b519828 100644 --- a/crypto/signature_test.go +++ b/crypto/signature_test.go @@ -3,17 +3,18 @@ package neofscrypto_test import ( "testing" - "github.com/nspcc-dev/neofs-api-go/v2/refs" + "github.com/nspcc-dev/neofs-sdk-go/api/refs" neofscrypto "github.com/nspcc-dev/neofs-sdk-go/crypto" - "github.com/nspcc-dev/neofs-sdk-go/crypto/test" + neofscryptotest "github.com/nspcc-dev/neofs-sdk-go/crypto/test" "github.com/stretchr/testify/require" + "google.golang.org/protobuf/proto" ) const anyUnsupportedScheme = neofscrypto.ECDSA_WALLETCONNECT + 1 func TestSignatureLifecycle(t *testing.T) { data := []byte("Hello, world!") - signer := test.RandomSigner(t) + signer := neofscryptotest.RandomSigner() scheme := signer.Scheme() pubKey := signer.Public() bPubKey := neofscrypto.PublicKeyBytes(pubKey) @@ -33,34 +34,34 @@ func TestSignatureLifecycle(t *testing.T) { testSig(clientSig) - var sigV2 refs.Signature - clientSig.WriteToV2(&sigV2) + var apiSig refs.Signature + clientSig.WriteToV2(&apiSig) - require.Equal(t, refs.SignatureScheme(scheme), sigV2.GetScheme()) - require.Equal(t, bPubKey, sigV2.GetKey()) - require.Equal(t, clientSig.Value(), sigV2.GetSign()) + require.Equal(t, refs.SignatureScheme(scheme), apiSig.GetScheme()) + require.Equal(t, bPubKey, apiSig.GetKey()) + require.Equal(t, clientSig.Value(), apiSig.GetSign()) - // sigV2 transmitted to server over the network + // apiSig transmitted to server over the network var serverSig neofscrypto.Signature - err = serverSig.ReadFromV2(sigV2) + err = serverSig.ReadFromV2(&apiSig) require.NoError(t, err) testSig(serverSig) // break the message in different ways for i, breakSig := range []func(*refs.Signature){ - func(sigV2 *refs.Signature) { sigV2.SetScheme(refs.SignatureScheme(anyUnsupportedScheme)) }, - func(sigV2 *refs.Signature) { - key := sigV2.GetKey() - sigV2.SetKey(key[:len(key)-1]) + func(apiSig *refs.Signature) { apiSig.Scheme = refs.SignatureScheme(anyUnsupportedScheme) }, + func(apiSig *refs.Signature) { + key := apiSig.GetKey() + apiSig.Key = key[:len(key)-1] }, - func(sigV2 *refs.Signature) { sigV2.SetKey(append(sigV2.GetKey(), 1)) }, - func(sigV2 *refs.Signature) { sigV2.SetSign(nil) }, + func(apiSig *refs.Signature) { apiSig.Key = append(apiSig.GetKey(), 1) }, + func(apiSig *refs.Signature) { apiSig.Sign = nil }, } { - sigV2Cp := sigV2 - breakSig(&sigV2Cp) + sigV2Cp := proto.Clone(&apiSig).(*refs.Signature) + breakSig(sigV2Cp) err = serverSig.ReadFromV2(sigV2Cp) require.Errorf(t, err, "break func #%d", i) @@ -68,7 +69,7 @@ func TestSignatureLifecycle(t *testing.T) { } func TestNewSignature(t *testing.T) { - signer := test.RandomSigner(t) + signer := neofscryptotest.RandomSigner() scheme := signer.Scheme() pubKey := signer.Public() val := []byte("Hello, world!") // may be any for this test @@ -89,7 +90,7 @@ func TestNewSignature(t *testing.T) { var sig2 neofscrypto.Signature - err := sig2.ReadFromV2(sigMsg) + err := sig2.ReadFromV2(&sigMsg) require.NoError(t, err) checkFields(sig2) diff --git a/crypto/signer.go b/crypto/signer.go index 2fbe6ba1a..fa50d8098 100644 --- a/crypto/signer.go +++ b/crypto/signer.go @@ -4,7 +4,7 @@ import ( "errors" "fmt" - "github.com/nspcc-dev/neofs-api-go/v2/refs" + "github.com/nspcc-dev/neofs-sdk-go/api/refs" ) // ErrIncorrectSigner is returned from function when the signer passed to it @@ -16,15 +16,13 @@ var ErrIncorrectSigner = errors.New("incorrect signer") // Scheme represents digital signature algorithm with fixed cryptographic hash function. // // Negative values are reserved and depend on context (e.g. unsupported scheme). -type Scheme int32 +type Scheme uint32 //nolint:revive const ( - _ Scheme = iota - 1 - - ECDSA_SHA512 // ECDSA with SHA-512 hashing (FIPS 186-3) - ECDSA_DETERMINISTIC_SHA256 // Deterministic ECDSA with SHA-256 hashing (RFC 6979) - ECDSA_WALLETCONNECT // Wallet Connect signature scheme + ECDSA_SHA512 Scheme = iota // ECDSA with SHA-512 hashing (FIPS 186-3) + ECDSA_DETERMINISTIC_SHA256 // Deterministic ECDSA with SHA-256 hashing (RFC 6979) + ECDSA_WALLETCONNECT // Wallet Connect signature scheme ) // String implements fmt.Stringer. diff --git a/crypto/test/generate.go b/crypto/test/generate.go new file mode 100644 index 000000000..e3b3d59f2 --- /dev/null +++ b/crypto/test/generate.go @@ -0,0 +1,54 @@ +/* +Package neofscryptotest provides special help functions for testing NeoFS API and its environment. +*/ +package neofscryptotest + +import ( + "crypto/ecdsa" + "errors" + "fmt" + "math/rand" + + "github.com/nspcc-dev/neo-go/pkg/crypto/keys" + neofscrypto "github.com/nspcc-dev/neofs-sdk-go/crypto" + neofsecdsa "github.com/nspcc-dev/neofs-sdk-go/crypto/ecdsa" +) + +// ECDSAPrivateKey returns random ECDSA private key. +func ECDSAPrivateKey() ecdsa.PrivateKey { + p, err := keys.NewPrivateKey() + if err != nil { + panic(fmt.Errorf("unexpected private key randomizaiton failure: %w", err)) + } + return p.PrivateKey +} + +// TODO: drop Random, other functions don't write it +// RandomSigner returns random signer. +func RandomSigner() neofscrypto.Signer { + return neofsecdsa.Signer(ECDSAPrivateKey()) +} + +// RandomSignerRFC6979 returns random signer with +// [neofscrypto.ECDSA_DETERMINISTIC_SHA256] scheme. +func RandomSignerRFC6979() neofscrypto.Signer { + return neofsecdsa.SignerRFC6979(ECDSAPrivateKey()) +} + +type failedSigner struct { + neofscrypto.Signer +} + +func (x failedSigner) Sign([]byte) ([]byte, error) { return nil, errors.New("failed to sign") } + +// FailSigner wraps s to always return error from Sign method. +func FailSigner(s neofscrypto.Signer) neofscrypto.Signer { + return failedSigner{s} +} + +// Signature returns random neofscrypto.Signature. +func Signature() neofscrypto.Signature { + sig := make([]byte, 64) + rand.Read(sig) + return neofscrypto.NewSignature(neofscrypto.Scheme(rand.Uint32()%3), RandomSigner().Public(), sig) +} diff --git a/crypto/test/generate_test.go b/crypto/test/generate_test.go new file mode 100644 index 000000000..b9a81b262 --- /dev/null +++ b/crypto/test/generate_test.go @@ -0,0 +1,21 @@ +package neofscryptotest_test + +import ( + "testing" + + "github.com/nspcc-dev/neofs-sdk-go/api/refs" + neofscrypto "github.com/nspcc-dev/neofs-sdk-go/crypto" + neofscryptotest "github.com/nspcc-dev/neofs-sdk-go/crypto/test" + "github.com/stretchr/testify/require" +) + +func TestSignature(t *testing.T) { + v := neofscryptotest.Signature() + require.NotEqual(t, v, neofscryptotest.Signature()) + + var m refs.Signature + v.WriteToV2(&m) + var v2 neofscrypto.Signature + require.NoError(t, v2.ReadFromV2(&m)) + require.Equal(t, v, v2) +} diff --git a/crypto/test/tests.go b/crypto/test/tests.go deleted file mode 100644 index f63555331..000000000 --- a/crypto/test/tests.go +++ /dev/null @@ -1,79 +0,0 @@ -/* -Package tests provides special help functions for testing NeoFS API and its environment. - -All functions accepting `t *testing.T` that emphasize there are only for tests purposes. -*/ -package test - -import ( - "testing" - - "github.com/nspcc-dev/neo-go/pkg/crypto/keys" - neofscrypto "github.com/nspcc-dev/neofs-sdk-go/crypto" - neofsecdsa "github.com/nspcc-dev/neofs-sdk-go/crypto/ecdsa" - "github.com/nspcc-dev/neofs-sdk-go/user" - "github.com/stretchr/testify/require" -) - -// RandomSigner return neofscrypto.Signer ONLY for TESTs purposes. -// It may be used like helper to get new neofscrypto.Signer if you need it in yours tests. -func RandomSigner(tb testing.TB) neofscrypto.Signer { - p, err := keys.NewPrivateKey() - require.NoError(tb, err) - - return neofsecdsa.Signer(p.PrivateKey) -} - -// RandomSignerRFC6979 return [user.Signer] ONLY for TESTs purposes. -// It may be used like helper to get new [user.Signer] if you need it in yours tests. -func RandomSignerRFC6979(tb testing.TB) user.Signer { - p, err := keys.NewPrivateKey() - require.NoError(tb, err) - - return user.NewAutoIDSignerRFC6979(p.PrivateKey) -} - -// SignedComponent describes component which can signed and the signature may be verified. -type SignedComponent interface { - SignedData() []byte - Sign(neofscrypto.Signer) error - VerifySignature() bool -} - -// SignedComponentUserSigner is the same as [SignedComponent] but uses [user.Signer] instead of [neofscrypto.Signer]. -// It helps to cover all cases. -type SignedComponentUserSigner interface { - SignedData() []byte - Sign(user.Signer) error - VerifySignature() bool -} - -// SignedDataComponent tests [SignedComponent] for valid data generation by SignedData function. -func SignedDataComponent(tb testing.TB, signer neofscrypto.Signer, cmp SignedComponent) { - data := cmp.SignedData() - - sig, err := signer.Sign(data) - require.NoError(tb, err) - - static := neofscrypto.NewStaticSigner(signer.Scheme(), sig, signer.Public()) - - err = cmp.Sign(static) - require.NoError(tb, err) - - require.True(tb, cmp.VerifySignature()) -} - -// SignedDataComponentUser tests [SignedComponentUserSigner] for valid data generation by SignedData function. -func SignedDataComponentUser(tb testing.TB, signer user.Signer, cmp SignedComponentUserSigner) { - data := cmp.SignedData() - - sig, err := signer.Sign(data) - require.NoError(tb, err) - - static := neofscrypto.NewStaticSigner(signer.Scheme(), sig, signer.Public()) - - err = cmp.Sign(user.NewSigner(static, signer.UserID())) - require.NoError(tb, err) - - require.True(tb, cmp.VerifySignature()) -} diff --git a/crypto/testdata/accounting_balance_request b/crypto/testdata/accounting_balance_request new file mode 100644 index 000000000..0773db661 --- /dev/null +++ b/crypto/testdata/accounting_balance_request @@ -0,0 +1 @@ +0a0c0a0a0a08616e795f7573657212ff010a0608c90110ca0110b00218af0222200a0e786865616465725f6b6579333031120e786865616465725f76616c33303122200a0e786865616465725f6b6579333032120e786865616465725f76616c3330323aa7010a0608c90110ca0110cc0118cb0122200a0e786865616465725f6b6579323031120e786865616465725f76616c32303122200a0e786865616465725f6b6579323032120e786865616465725f76616c3230323a500a04086510661068186722200a0e786865616465725f6b6579313031120e786865616465725f76616c31303122200a0e786865616465725f6b6579313032120e786865616465725f76616c313032400540cd0140b1021a820612770a21037710bfe3582c41bce0326fbc7d2a776f3c5fefe383068903c1c3d22a56f54c921250b92988c699cbab9a4d4cdd4a28c1e2380dc537324f42f725976a7d94e01e71a88ea1add5e085b1767e64285025f60501f0273e53cf91b0db24777f6d8ac56eed6834d0b3d086f28730bdddce7335fd8018021a770a21037710bfe3582c41bce0326fbc7d2a776f3c5fefe383068903c1c3d22a56f54c921250c3d8fc6ae82b1dda808ef350eeb7e34f9e316992e87cec7b28cd16713d767f7325b552c8162cfb1fe70921a4d6b04649b5c32a68fd256e4390216102899066c7b14c28bb7419802d5744d2187a8a3fc91802228d0412670a21034e268fd0717614e4e96526d128a50ee6af517dc7a1bc6a4493f7940707001b52124005ce6e2974b937e072dc5753f78e38fb8dbbf9d5eb2cc4d4b0c16f55f84270ccd5dd88e6851f9ab8bcad3381bcf7358969d837c918fad97aa62dc0b7c31bc24718011a670a21034e268fd0717614e4e96526d128a50ee6af517dc7a1bc6a4493f7940707001b521240b35ebb18c95faa123935c378554d5fa82fdf451b4e511cf18c5c2c2feb34f8d680d11c060df16a921336e28653e96162dfbcff58e0e69d3dccad67f8869ed2d5180122b8020a660a210315068c68cca7f30f65fb6bfef343db13333b77c992d0c9231381f5f6337066df12410428acbcc836df64dbf4b3336908abc11f5f77ad84bf1a6efac149f18fca50632cbf4d8dadd75dc0dbbc288ef2706ed08b436799d81c993cd277ba4b095c7cafab12660a210315068c68cca7f30f65fb6bfef343db13333b77c992d0c9231381f5f6337066df1241045eae7facba8eb5664b2a1c9d6cb68b095749a1dd2bab6e11bb3247c3fef11ee12482820f2a2836110e98da657478b5d0d5fa1515ad2889202ddcdba6a3194aa91a660a210315068c68cca7f30f65fb6bfef343db13333b77c992d0c9231381f5f6337066df12410422e8e2e940089d1ac58002264773450e80090cb107950ba5b795f2fd4cdbac8491a7a1e7e52da558b3cf6a87d8d5f0044cd0919baa14068feb1f5ffdf094dbb2 \ No newline at end of file diff --git a/crypto/testdata/accounting_balance_request.json b/crypto/testdata/accounting_balance_request.json new file mode 100644 index 000000000..ddb7e9d13 --- /dev/null +++ b/crypto/testdata/accounting_balance_request.json @@ -0,0 +1,105 @@ +{ + "body": { + "ownerId": { + "value": "YW55X3VzZXI=" + } + }, + "metaHeader": { + "version": { + "major": 201, + "minor": 202 + }, + "epoch": "304", + "ttl": 303, + "xHeaders": [ + { + "key": "xheader_key301", + "value": "xheader_val301" + }, + { + "key": "xheader_key302", + "value": "xheader_val302" + } + ], + "origin": { + "version": { + "major": 201, + "minor": 202 + }, + "epoch": "204", + "ttl": 203, + "xHeaders": [ + { + "key": "xheader_key201", + "value": "xheader_val201" + }, + { + "key": "xheader_key202", + "value": "xheader_val202" + } + ], + "origin": { + "version": { + "major": 101, + "minor": 102 + }, + "epoch": "104", + "ttl": 103, + "xHeaders": [ + { + "key": "xheader_key101", + "value": "xheader_val101" + }, + { + "key": "xheader_key102", + "value": "xheader_val102" + } + ], + "magicNumber": "5" + }, + "magicNumber": "205" + }, + "magicNumber": "305" + }, + "verifyHeader": { + "metaSignature": { + "key": "A3cQv+NYLEG84DJvvH0qd288X+/jgwaJA8HD0ipW9UyS", + "signature": "uSmIxpnLq5pNTN1KKMHiOA3FNzJPQvcll2p9lOAecaiOoa3V4IWxdn5kKFAl9gUB8Cc+U8+RsNskd39tisVu7Wg00LPQhvKHML3dznM1/YA=", + "scheme": "ECDSA_RFC6979_SHA256_WALLET_CONNECT" + }, + "originSignature": { + "key": "A3cQv+NYLEG84DJvvH0qd288X+/jgwaJA8HD0ipW9UyS", + "signature": "w9j8augrHdqAjvNQ7rfjT54xaZLofOx7KM0WcT12f3MltVLIFiz7H+cJIaTWsEZJtcMqaP0lbkOQIWECiZBmx7FMKLt0GYAtV0TSGHqKP8k=", + "scheme": "ECDSA_RFC6979_SHA256_WALLET_CONNECT" + }, + "origin": { + "metaSignature": { + "key": "A04mj9BxdhTk6WUm0SilDuavUX3HobxqRJP3lAcHABtS", + "signature": "Bc5uKXS5N+By3FdT9444+427+dXrLMTUsMFvVfhCcMzV3YjmhR+auLytM4G89zWJadg3yRj62XqmLcC3wxvCRw==", + "scheme": "ECDSA_RFC6979_SHA256" + }, + "originSignature": { + "key": "A04mj9BxdhTk6WUm0SilDuavUX3HobxqRJP3lAcHABtS", + "signature": "s167GMlfqhI5NcN4VU1fqC/fRRtOURzxjFwsL+s0+NaA0RwGDfFqkhM24oZT6WFi37z/WODmnT3MrWf4hp7S1Q==", + "scheme": "ECDSA_RFC6979_SHA256" + }, + "origin": { + "bodySignature": { + "key": "AxUGjGjMp/MPZftr/vND2xMzO3fJktDJIxOB9fYzcGbf", + "signature": "BCisvMg232Tb9LMzaQirwR9fd62Evxpu+sFJ8Y/KUGMsv02NrdddwNu8KI7ycG7Qi0NnmdgcmTzSd7pLCVx8r6s=", + "scheme": "ECDSA_SHA512" + }, + "metaSignature": { + "key": "AxUGjGjMp/MPZftr/vND2xMzO3fJktDJIxOB9fYzcGbf", + "signature": "BF6uf6y6jrVmSyocnWy2iwlXSaHdK6tuEbsyR8P+8R7hJIKCDyooNhEOmNpldHi10NX6FRWtKIkgLdzbpqMZSqk=", + "scheme": "ECDSA_SHA512" + }, + "originSignature": { + "key": "AxUGjGjMp/MPZftr/vND2xMzO3fJktDJIxOB9fYzcGbf", + "signature": "BCLo4ulACJ0axYACJkdzRQ6ACQyxB5ULpbeV8v1M26yEkaeh5+UtpVizz2qH2NXwBEzQkZuqFAaP6x9f/fCU27I=", + "scheme": "ECDSA_SHA512" + } + } + } + } +} diff --git a/crypto/testdata/accounting_balance_response b/crypto/testdata/accounting_balance_response new file mode 100644 index 000000000..fdb82d6fe --- /dev/null +++ b/crypto/testdata/accounting_balance_response @@ -0,0 +1 @@ +0a060a04080110021285020a0608c90110ca0110b00218af0222200a0e786865616465725f6b6579333031120e786865616465725f76616c33303122200a0e786865616465725f6b6579333032120e786865616465725f76616c3330322aab010a0608c90110ca0110cc0118cb0122200a0e786865616465725f6b6579323031120e786865616465725f76616c32303122200a0e786865616465725f6b6579323032120e786865616465725f76616c3230322a520a04086510661068186722200a0e786865616465725f6b6579313031120e786865616465725f76616c31303122200a0e786865616465725f6b6579313032120e786865616465725f76616c3130323202086a320308ce01320308b2021a820612770a2102d4cd426ec4a20af51ef5a208cb259a5f77fd7f3996ac301952c5b810365cffc71250f4faef5e5f4a3e8e2becf57f8b9142a912fe1114b4c2b87bd3f0d7ecc2a6ab77db944eaf6e609dae35752a1fddef471456920bca530b3936c527fa0870b91f206a9b4314445430ef9fe66093491af52d18021a770a2102d4cd426ec4a20af51ef5a208cb259a5f77fd7f3996ac301952c5b810365cffc71250275c418648bd69d18cece9450c63f37b5e4ef2e2302f5b0d88c7d1b9c19932aa838323b1f50ea59a1e36a339c9db9533a1f105954b07f8597e1a4ff7aee6bc390b6143b21e04073e8c7aa7fe819d36af1802228d0412670a2103729ee725569a4edbb80e85969e1ea99ba85553886f12ad43eb740569e4234f55124098534a99b89f9670421916c4cde6ddedc8a846ed970158c2f0aafed8cfce64446793db9163cf9ca31eb70468aa1d8bace7181928a345e2620e64e8dc3252a03018011a670a2103729ee725569a4edbb80e85969e1ea99ba85553886f12ad43eb740569e4234f5512400e5e0f72c90a568f055d0dcb965741f2dedc4f7438e4804e5f64f07f5b71fe7036cf9b896b3bc68b8d8d81386cb0b365868db49807362bb36840eea02e22bd2b180122b8020a660a210331822bfb3c19a8d4718819af0430c28d6e4fa29f0d04eee92385c16838efbf2b124104eb4a0eb9c5f5883f55363b1057b5358263fdbdc21859d6768bc6be0869f704b72cc5ebc0ee5fb092b00a676de9172217355f1200f90770d306d22046b6d2ea7112660a210331822bfb3c19a8d4718819af0430c28d6e4fa29f0d04eee92385c16838efbf2b124104457428b27811eb7b900cbebdd2b70d7bd7509d460d41b955f02fc6a64bf9c8710dfa256731baaca1d014a6affb85485210e815914367b1b90753df1af8a4b2921a660a210331822bfb3c19a8d4718819af0430c28d6e4fa29f0d04eee92385c16838efbf2b124104592c0b604ea246611aa307b6fb35fa7d3e3eb4970e999a1706b22e2800f704216043d44525d7b8a7c0135ba2f32d7fcd443c5b34098d1dd0dd27da68f2b0abe4 \ No newline at end of file diff --git a/crypto/testdata/accounting_balance_response.json b/crypto/testdata/accounting_balance_response.json new file mode 100644 index 000000000..3435853dd --- /dev/null +++ b/crypto/testdata/accounting_balance_response.json @@ -0,0 +1,118 @@ +{ + "body": { + "balance": { + "value": "1", + "precision": 2 + } + }, + "metaHeader": { + "version": { + "major": 201, + "minor": 202 + }, + "epoch": "304", + "ttl": 303, + "xHeaders": [ + { + "key": "xheader_key301", + "value": "xheader_val301" + }, + { + "key": "xheader_key302", + "value": "xheader_val302" + } + ], + "origin": { + "version": { + "major": 201, + "minor": 202 + }, + "epoch": "204", + "ttl": 203, + "xHeaders": [ + { + "key": "xheader_key201", + "value": "xheader_val201" + }, + { + "key": "xheader_key202", + "value": "xheader_val202" + } + ], + "origin": { + "version": { + "major": 101, + "minor": 102 + }, + "epoch": "104", + "ttl": 103, + "xHeaders": [ + { + "key": "xheader_key101", + "value": "xheader_val101" + }, + { + "key": "xheader_key102", + "value": "xheader_val102" + } + ], + "status": { + "code": 106, + "message": "", + "details": [] + } + }, + "status": { + "code": 206, + "message": "", + "details": [] + } + }, + "status": { + "code": 306, + "message": "", + "details": [] + } + }, + "verifyHeader": { + "metaSignature": { + "key": "AtTNQm7Eogr1HvWiCMslml93/X85lqwwGVLFuBA2XP/H", + "signature": "9PrvXl9KPo4r7PV/i5FCqRL+ERS0wrh70/DX7MKmq3fblE6vbmCdrjV1Kh/d70cUVpILylMLOTbFJ/oIcLkfIGqbQxREVDDvn+Zgk0ka9S0=", + "scheme": "ECDSA_RFC6979_SHA256_WALLET_CONNECT" + }, + "originSignature": { + "key": "AtTNQm7Eogr1HvWiCMslml93/X85lqwwGVLFuBA2XP/H", + "signature": "J1xBhki9adGM7OlFDGPze15O8uIwL1sNiMfRucGZMqqDgyOx9Q6lmh42oznJ25UzofEFlUsH+Fl+Gk/3rua8OQthQ7IeBAc+jHqn/oGdNq8=", + "scheme": "ECDSA_RFC6979_SHA256_WALLET_CONNECT" + }, + "origin": { + "metaSignature": { + "key": "A3Ke5yVWmk7buA6Flp4eqZuoVVOIbxKtQ+t0BWnkI09V", + "signature": "mFNKmbiflnBCGRbEzebd7cioRu2XAVjC8Kr+2M/OZERnk9uRY8+cox63BGiqHYus5xgZKKNF4mIOZOjcMlKgMA==", + "scheme": "ECDSA_RFC6979_SHA256" + }, + "originSignature": { + "key": "A3Ke5yVWmk7buA6Flp4eqZuoVVOIbxKtQ+t0BWnkI09V", + "signature": "Dl4PcskKVo8FXQ3LlldB8t7cT3Q45IBOX2Twf1tx/nA2z5uJazvGi42NgThssLNlho20mAc2K7NoQO6gLiK9Kw==", + "scheme": "ECDSA_RFC6979_SHA256" + }, + "origin": { + "bodySignature": { + "key": "AzGCK/s8GajUcYgZrwQwwo1uT6KfDQTu6SOFwWg4778r", + "signature": "BOtKDrnF9Yg/VTY7EFe1NYJj/b3CGFnWdovGvghp9wS3LMXrwO5fsJKwCmdt6RciFzVfEgD5B3DTBtIgRrbS6nE=", + "scheme": "ECDSA_SHA512" + }, + "metaSignature": { + "key": "AzGCK/s8GajUcYgZrwQwwo1uT6KfDQTu6SOFwWg4778r", + "signature": "BEV0KLJ4Eet7kAy+vdK3DXvXUJ1GDUG5VfAvxqZL+chxDfolZzG6rKHQFKav+4VIUhDoFZFDZ7G5B1PfGvikspI=", + "scheme": "ECDSA_SHA512" + }, + "originSignature": { + "key": "AzGCK/s8GajUcYgZrwQwwo1uT6KfDQTu6SOFwWg4778r", + "signature": "BFksC2BOokZhGqMHtvs1+n0+PrSXDpmaFwayLigA9wQhYEPURSXXuKfAE1ui8y1/zUQ8WzQJjR3Q3SfaaPKwq+Q=", + "scheme": "ECDSA_SHA512" + } + } + } + } +} \ No newline at end of file diff --git a/crypto/testdata/container_delete_request b/crypto/testdata/container_delete_request new file mode 100644 index 000000000..ccbfc542a --- /dev/null +++ b/crypto/testdata/container_delete_request @@ -0,0 +1 @@ +0a320a0f0a0d616e795f636f6e7461696e6572121f0a0e616e795f7075626c69635f6b6579120d616e795f7369676e617475726512ff010a0608c90110ca0110b00218af0222200a0e786865616465725f6b6579333031120e786865616465725f76616c33303122200a0e786865616465725f6b6579333032120e786865616465725f76616c3330323aa7010a0608c90110ca0110cc0118cb0122200a0e786865616465725f6b6579323031120e786865616465725f76616c32303122200a0e786865616465725f6b6579323032120e786865616465725f76616c3230323a500a04086510661068186722200a0e786865616465725f6b6579313031120e786865616465725f76616c31303122200a0e786865616465725f6b6579313032120e786865616465725f76616c313032400540cd0140b1021a820612770a210320e5e25693f53475c32f63b19313f1488467f1385a7d57a933020e67158373f31250968957299b6b9acc2e05a1fb59e272d7ccc9439970c854ecd9805be7775c7cb7159610544ecccd717c7d7ba26ad945c8fa3cebbbb66c2caf1c13e651aa3f3338d1caa410422175b358e9f032f5b38efc18021a770a210320e5e25693f53475c32f63b19313f1488467f1385a7d57a933020e67158373f312508c8765faf8fb59f85c8bb65bd8618a97afcee3f75bb5baab359967be723af9a133e3f4bca264510d90e7450a2faa0ecdbed3e0a41b92574a8c47a08f408e32a24deae3d3227f886921044b4b1f9839491802228d0412670a21033f3041fc93a73df665d0357d84721cdbbdaa40d0d2781a69e2ff29cd6ec29e941240c93564baafc5714e46c6e272946f0e4dcc7ec9d18f4423a202b66c811da38a27f6fcdca9d5f38f66a2b3ab27967559baf118952fbb0a44588120570a277cb88c18011a670a21033f3041fc93a73df665d0357d84721cdbbdaa40d0d2781a69e2ff29cd6ec29e941240e298f66d2b95a7321654d950165bcb1b1b977399add149ad22bf656c197397870574ece3941a597e1847470244cf11cac759c9a4e297b934209ccded8974eea5180122b8020a660a210291fa398b8447d5a5bc0b486e7cd2018df8d28f1ca96f1e138966bb644e418e3c12410418167800fb90e2fe9f3f8c2db45f26686300e43a34162837919bbb71d11e88e6388c9bc0a9edbe7079d9a9f4f2251eae9224fa93e2fc98e44d38f432ea23062d12660a210291fa398b8447d5a5bc0b486e7cd2018df8d28f1ca96f1e138966bb644e418e3c12410439a034a282e6ace2aa8157a18113fa19be554239756da800ba40fce2cc933fac7219212ce8cdd269ed67bbd7cf61966812548ece2c45cc028bf197705cfc7c021a660a210291fa398b8447d5a5bc0b486e7cd2018df8d28f1ca96f1e138966bb644e418e3c124104303d8eaa90a933f00b95fac2becf5d628c9b34f6ed5aeaf7976b757039b842130297f60cca0cd4150c071e1f964f3314089caff24f5ffc42a47ae07e37aed42f \ No newline at end of file diff --git a/crypto/testdata/container_delete_request.json b/crypto/testdata/container_delete_request.json new file mode 100644 index 000000000..477201b5e --- /dev/null +++ b/crypto/testdata/container_delete_request.json @@ -0,0 +1,106 @@ +{ + "body": { + "containerId": { + "value": "YW55X2NvbnRhaW5lcg==" + }, + "signature": { + "key": "YW55X3B1YmxpY19rZXk=", + "signature": "YW55X3NpZ25hdHVyZQ==" + } + }, + "metaHeader": { + "version": { + "major": 201, + "minor": 202 + }, + "epoch": "304", + "ttl": 303, + "xHeaders": [ + { + "key": "xheader_key301", + "value": "xheader_val301" + }, + { + "key": "xheader_key302", + "value": "xheader_val302" + } + ], + "origin": { + "version": { + "major": 201, + "minor": 202 + }, + "epoch": "204", + "ttl": 203, + "xHeaders": [ + { + "key": "xheader_key201", + "value": "xheader_val201" + }, + { + "key": "xheader_key202", + "value": "xheader_val202" + } + ], + "origin": { + "version": { + "major": 101, + "minor": 102 + }, + "epoch": "104", + "ttl": 103, + "xHeaders": [ + { + "key": "xheader_key101", + "value": "xheader_val101" + }, + { + "key": "xheader_key102", + "value": "xheader_val102" + } + ], + "magicNumber": "5" + }, + "magicNumber": "205" + }, + "magicNumber": "305" + }, + "verifyHeader": { + "metaSignature": { + "key": "AyDl4laT9TR1wy9jsZMT8UiEZ/E4Wn1XqTMCDmcVg3Pz", + "signature": "lolXKZtrmswuBaH7WeJy18zJQ5lwyFTs2YBb53dcfLcVlhBUTszNcXx9e6Jq2UXI+jzru7ZsLK8cE+ZRqj8zONHKpBBCIXWzWOnwMvWzjvw=", + "scheme": "ECDSA_RFC6979_SHA256_WALLET_CONNECT" + }, + "originSignature": { + "key": "AyDl4laT9TR1wy9jsZMT8UiEZ/E4Wn1XqTMCDmcVg3Pz", + "signature": "jIdl+vj7Wfhci7Zb2GGKl6/O4/dbtbqrNZlnvnI6+aEz4/S8omRRDZDnRQovqg7NvtPgpBuSV0qMR6CPQI4yok3q49Mif4hpIQRLSx+YOUk=", + "scheme": "ECDSA_RFC6979_SHA256_WALLET_CONNECT" + }, + "origin": { + "metaSignature": { + "key": "Az8wQfyTpz32ZdA1fYRyHNu9qkDQ0ngaaeL/Kc1uwp6U", + "signature": "yTVkuq/FcU5GxuJylG8OTcx+ydGPRCOiArZsgR2jiif2/Nyp1fOPZqKzqyeWdVm68RiVL7sKRFiBIFcKJ3y4jA==", + "scheme": "ECDSA_RFC6979_SHA256" + }, + "originSignature": { + "key": "Az8wQfyTpz32ZdA1fYRyHNu9qkDQ0ngaaeL/Kc1uwp6U", + "signature": "4pj2bSuVpzIWVNlQFlvLGxuXc5mt0UmtIr9lbBlzl4cFdOzjlBpZfhhHRwJEzxHKx1nJpOKXuTQgnM3tiXTupQ==", + "scheme": "ECDSA_RFC6979_SHA256" + }, + "origin": { + "bodySignature": { + "key": "ApH6OYuER9WlvAtIbnzSAY340o8cqW8eE4lmu2ROQY48", + "signature": "BBgWeAD7kOL+nz+MLbRfJmhjAOQ6NBYoN5Gbu3HRHojmOIybwKntvnB52an08iUerpIk+pPi/JjkTTj0MuojBi0=" + }, + "metaSignature": { + "key": "ApH6OYuER9WlvAtIbnzSAY340o8cqW8eE4lmu2ROQY48", + "signature": "BDmgNKKC5qziqoFXoYET+hm+VUI5dW2oALpA/OLMkz+schkhLOjN0mntZ7vXz2GWaBJUjs4sRcwCi/GXcFz8fAI=" + }, + "originSignature": { + "key": "ApH6OYuER9WlvAtIbnzSAY340o8cqW8eE4lmu2ROQY48", + "signature": "BDA9jqqQqTPwC5X6wr7PXWKMmzT27Vrq95drdXA5uEITApf2DMoM1BUMBx4flk8zFAicr/JPX/xCpHrgfjeu1C8=" + } + } + } + } +} diff --git a/crypto/testdata/container_delete_response b/crypto/testdata/container_delete_response new file mode 100644 index 000000000..9d89cef17 --- /dev/null +++ b/crypto/testdata/container_delete_response @@ -0,0 +1 @@ +0a001285020a0608c90110ca0110b00218af0222200a0e786865616465725f6b6579333031120e786865616465725f76616c33303122200a0e786865616465725f6b6579333032120e786865616465725f76616c3330322aab010a0608c90110ca0110cc0118cb0122200a0e786865616465725f6b6579323031120e786865616465725f76616c32303122200a0e786865616465725f6b6579323032120e786865616465725f76616c3230322a520a04086510661068186722200a0e786865616465725f6b6579313031120e786865616465725f76616c31303122200a0e786865616465725f6b6579313032120e786865616465725f76616c3130323202086a320308ce01320308b2021a820612770a2103a28aaf53c37815e14401d76ffc6117334fab464771a5d6584ed1ebacdadc03ff1250354381a12221cef30c2dbc92c201759577570b02bb9d18d6779ff3363a0292a0159b3ba9d15a6e40e5f5ddaa64ae14a7f9b226a82e2f0032f83f7ac4df84a9e2eb1f04ea0bd5dde9ebac94b3f6d3266918021a770a2103a28aaf53c37815e14401d76ffc6117334fab464771a5d6584ed1ebacdadc03ff12507b94f1dd2db55a14a0947389b396fa24050bef4d2548e4487be4733fb2b217a2d5ce024190d49163b313e14342f3549e1d2ca0d28b8f61fc38e92a47f8869f6b071f323a32657d6d45e20aa1e30022241802228d0412670a210392e9e457de69ec7407bdc8d77981adb05a0d72481af75bfb36a0a162440b20e6124014391fb733992fe74a0fc094793184cebc236adf43ce7862a6185cd556065f9c40d302155a63fdcf9578a1b3113543810536652abccec1b0c9d7185a0abcf2d118011a670a210392e9e457de69ec7407bdc8d77981adb05a0d72481af75bfb36a0a162440b20e612402b54302a0d0b1c33048b428fb9e948585d408521a4d232c7e682bb1a41a8d9dea432575748a53784f363f0f8c3ec02db1d77fcb4382d2fe731acf30a00b71eda180122b8020a660a210313f29f29e4247595ee4669b15419eb2fa9137893d55b2b6b4aeeadf7f2903f04124104b08788dd722d3cf311fabd280a286c230a2f5aaca641a7dc41534c024cd6c19dd8fda82f85bda895ceaaa80ad0cf78ae5814355b448627ded9ec8f680aca5deb12660a210313f29f29e4247595ee4669b15419eb2fa9137893d55b2b6b4aeeadf7f2903f041241044cd0df8dc289d46bc758f45f4097727baad462f174354460628bddd172787f18d62aa3d944fe580b8d54731ecb37ec607049738c6fbcdf416110e973c158d6ed1a660a210313f29f29e4247595ee4669b15419eb2fa9137893d55b2b6b4aeeadf7f2903f041241048a3f26a23a546e2093d021cebad607c9c8c12c98dd78e31c0a6d3d92b2e7a47562ef74ccdbfe35db6723f2856a0f3bbfe5ff60fd5790f9b1e798870b8bf1683f \ No newline at end of file diff --git a/crypto/testdata/container_delete_response.json b/crypto/testdata/container_delete_response.json new file mode 100644 index 000000000..af509778e --- /dev/null +++ b/crypto/testdata/container_delete_response.json @@ -0,0 +1,104 @@ +{ + "body": {}, + "metaHeader": { + "version": { + "major": 201, + "minor": 202 + }, + "epoch": "304", + "ttl": 303, + "xHeaders": [ + { + "key": "xheader_key301", + "value": "xheader_val301" + }, + { + "key": "xheader_key302", + "value": "xheader_val302" + } + ], + "origin": { + "version": { + "major": 201, + "minor": 202 + }, + "epoch": "204", + "ttl": 203, + "xHeaders": [ + { + "key": "xheader_key201", + "value": "xheader_val201" + }, + { + "key": "xheader_key202", + "value": "xheader_val202" + } + ], + "origin": { + "version": { + "major": 101, + "minor": 102 + }, + "epoch": "104", + "ttl": 103, + "xHeaders": [ + { + "key": "xheader_key101", + "value": "xheader_val101" + }, + { + "key": "xheader_key102", + "value": "xheader_val102" + } + ], + "status": { + "code": 106 + } + }, + "status": { + "code": 206 + } + }, + "status": { + "code": 306 + } + }, + "verifyHeader": { + "metaSignature": { + "key": "A6KKr1PDeBXhRAHXb/xhFzNPq0ZHcaXWWE7R66za3AP/", + "signature": "NUOBoSIhzvMMLbySwgF1lXdXCwK7nRjWd5/zNjoCkqAVmzup0VpuQOX13apkrhSn+bImqC4vADL4P3rE34Sp4usfBOoL1d3p66yUs/bTJmk=", + "scheme": "ECDSA_RFC6979_SHA256_WALLET_CONNECT" + }, + "originSignature": { + "key": "A6KKr1PDeBXhRAHXb/xhFzNPq0ZHcaXWWE7R66za3AP/", + "signature": "e5Tx3S21WhSglHOJs5b6JAUL700lSORIe+RzP7KyF6LVzgJBkNSRY7MT4UNC81SeHSyg0ouPYfw46SpH+IafawcfMjoyZX1tReIKoeMAIiQ=", + "scheme": "ECDSA_RFC6979_SHA256_WALLET_CONNECT" + }, + "origin": { + "metaSignature": { + "key": "A5Lp5Ffeaex0B73I13mBrbBaDXJIGvdb+zagoWJECyDm", + "signature": "FDkftzOZL+dKD8CUeTGEzrwjat9Dznhiphhc1VYGX5xA0wIVWmP9z5V4obMRNUOBBTZlKrzOwbDJ1xhaCrzy0Q==", + "scheme": "ECDSA_RFC6979_SHA256" + }, + "originSignature": { + "key": "A5Lp5Ffeaex0B73I13mBrbBaDXJIGvdb+zagoWJECyDm", + "signature": "K1QwKg0LHDMEi0KPuelIWF1AhSGk0jLH5oK7GkGo2d6kMldXSKU3hPNj8PjD7ALbHXf8tDgtL+cxrPMKALce2g==", + "scheme": "ECDSA_RFC6979_SHA256" + }, + "origin": { + "bodySignature": { + "key": "AxPynynkJHWV7kZpsVQZ6y+pE3iT1Vsra0rurffykD8E", + "signature": "BLCHiN1yLTzzEfq9KAoobCMKL1qspkGn3EFTTAJM1sGd2P2oL4W9qJXOqqgK0M94rlgUNVtEhife2eyPaArKXes=" + }, + "metaSignature": { + "key": "AxPynynkJHWV7kZpsVQZ6y+pE3iT1Vsra0rurffykD8E", + "signature": "BEzQ343CidRrx1j0X0CXcnuq1GLxdDVEYGKL3dFyeH8Y1iqj2UT+WAuNVHMeyzfsYHBJc4xvvN9BYRDpc8FY1u0=" + }, + "originSignature": { + "key": "AxPynynkJHWV7kZpsVQZ6y+pE3iT1Vsra0rurffykD8E", + "signature": "BIo/JqI6VG4gk9AhzrrWB8nIwSyY3XjjHAptPZKy56R1Yu90zNv+NdtnI/KFag87v+X/YP1XkPmx55iHC4vxaD8=" + } + } + } + } +} diff --git a/crypto/testdata/container_get_request b/crypto/testdata/container_get_request new file mode 100644 index 000000000..1c426f89a --- /dev/null +++ b/crypto/testdata/container_get_request @@ -0,0 +1 @@ +0a110a0f0a0d616e795f636f6e7461696e657212ff010a0608c90110ca0110b00218af0222200a0e786865616465725f6b6579333031120e786865616465725f76616c33303122200a0e786865616465725f6b6579333032120e786865616465725f76616c3330323aa7010a0608c90110ca0110cc0118cb0122200a0e786865616465725f6b6579323031120e786865616465725f76616c32303122200a0e786865616465725f6b6579323032120e786865616465725f76616c3230323a500a04086510661068186722200a0e786865616465725f6b6579313031120e786865616465725f76616c31303122200a0e786865616465725f6b6579313032120e786865616465725f76616c313032400540cd0140b1021a820612770a2103e941db649578ef4df430b629b2d6091687fefcc591b17751f4eb85e6b664a00712507bb90a4413daa29b4878e40158260f52d95f2f3430f4bcb53bbec40eeb1942f4f7bffb7f14dca889de5a93a6c55dfcca2470e4f1a1e10a85c683d3ffb480cc81edaa8150f8fcaa15d68df564d803989918021a770a2103e941db649578ef4df430b629b2d6091687fefcc591b17751f4eb85e6b664a0071250772ecadb7077d8f1c4ee76501ec1db3aa357066647d998bafcabbd6e97dffb8dd998d3d0f57e2af9b5b0a4becba0983e5cfef6c20f1c3f2e4a89242b9aaa4c2e562f203dbefee03e9056e7f02a236d6a1802228d0412670a210300752317890fd277093a38784f4a9574e4776221cd6307377a235eb179d40e961240bc333edb3c28ac4b8be83d78e4102eb4ff4f4f19a2a2461977558a173a3da92255bb2d49ae59028f550e3b7c23b4b6f1da0604e3f51b9fff4e97d98706ddcbbc18011a670a210300752317890fd277093a38784f4a9574e4776221cd6307377a235eb179d40e9612406537c4e03df2102c66105400d5b43ea602cbc8688ffccd988903e1d3a754a173f47562032d7ccc527a2b3f919cab16b6e9de79edc8a86ac9d4d29abf9b3c471a180122b8020a660a210395307dfab5efa6e5b4bcec2aaa2c166f2a80fa579161825620334b7ca5d7440c124104528fa129dc02833d0ec543d17ffc56b5fb93ada6250f417defe7e76103368aab6c6c6a10cc13ffbcf0975f0c7aba6dc38b603333ae5dc81071d37be207fdfb5412660a210395307dfab5efa6e5b4bcec2aaa2c166f2a80fa579161825620334b7ca5d7440c124104229b2877aee20aca6ce15d4bff2dd9b5ac6531f0061f3f46e5ec880baae7de49da320a544e9dff974c6eaaf8ffddfa47bed5d84ba88d38e9e37d00b4668bb5851a660a210395307dfab5efa6e5b4bcec2aaa2c166f2a80fa579161825620334b7ca5d7440c124104886ab600c56bf8ad0cc7c47774b82491697aa89d1940dcf08908e22361e480ada1f93942071c40f214e8b7d1fb357ac9b99ebf0db25e5bdb4375da393a2e8aec \ No newline at end of file diff --git a/crypto/testdata/container_get_request.json b/crypto/testdata/container_get_request.json new file mode 100644 index 000000000..8598f0fbd --- /dev/null +++ b/crypto/testdata/container_get_request.json @@ -0,0 +1,105 @@ +{ + "body": { + "containerId": { + "value": "YW55X2NvbnRhaW5lcg==" + } + }, + "metaHeader": { + "version": { + "major": 201, + "minor": 202 + }, + "epoch": "304", + "ttl": 303, + "xHeaders": [ + { + "key": "xheader_key301", + "value": "xheader_val301" + }, + { + "key": "xheader_key302", + "value": "xheader_val302" + } + ], + "origin": { + "version": { + "major": 201, + "minor": 202 + }, + "epoch": "204", + "ttl": 203, + "xHeaders": [ + { + "key": "xheader_key201", + "value": "xheader_val201" + }, + { + "key": "xheader_key202", + "value": "xheader_val202" + } + ], + "origin": { + "version": { + "major": 101, + "minor": 102 + }, + "epoch": "104", + "ttl": 103, + "xHeaders": [ + { + "key": "xheader_key101", + "value": "xheader_val101" + }, + { + "key": "xheader_key102", + "value": "xheader_val102" + } + ], + "magicNumber": "5" + }, + "magicNumber": "205" + }, + "magicNumber": "305" + }, + "verifyHeader": { + "metaSignature": { + "key": "A+lB22SVeO9N9DC2KbLWCRaH/vzFkbF3UfTrhea2ZKAH", + "signature": "e7kKRBPaoptIeOQBWCYPUtlfLzQw9Ly1O77EDusZQvT3v/t/FNyoid5ak6bFXfzKJHDk8aHhCoXGg9P/tIDMge2qgVD4/KoV1o31ZNgDmJk=", + "scheme": "ECDSA_RFC6979_SHA256_WALLET_CONNECT" + }, + "originSignature": { + "key": "A+lB22SVeO9N9DC2KbLWCRaH/vzFkbF3UfTrhea2ZKAH", + "signature": "dy7K23B32PHE7nZQHsHbOqNXBmZH2Zi6/Ku9bpff+43ZmNPQ9X4q+bWwpL7LoJg+XP72wg8cPy5KiSQrmqpMLlYvID2+/uA+kFbn8CojbWo=", + "scheme": "ECDSA_RFC6979_SHA256_WALLET_CONNECT" + }, + "origin": { + "metaSignature": { + "key": "AwB1IxeJD9J3CTo4eE9KlXTkd2IhzWMHN3ojXrF51A6W", + "signature": "vDM+2zworEuL6D145BAutP9PTxmiokYZd1WKFzo9qSJVuy1JrlkCj1UOO3wjtLbx2gYE4/Ubn/9Ol9mHBt3LvA==", + "scheme": "ECDSA_RFC6979_SHA256" + }, + "originSignature": { + "key": "AwB1IxeJD9J3CTo4eE9KlXTkd2IhzWMHN3ojXrF51A6W", + "signature": "ZTfE4D3yECxmEFQA1bQ+pgLLyGiP/M2YiQPh06dUoXP0dWIDLXzMUnorP5Gcqxa26d557cioasnU0pq/mzxHGg==", + "scheme": "ECDSA_RFC6979_SHA256" + }, + "origin": { + "bodySignature": { + "key": "A5Uwffq176bltLzsKqosFm8qgPpXkWGCViAzS3yl10QM", + "signature": "BFKPoSncAoM9DsVD0X/8VrX7k62mJQ9Bfe/n52EDNoqrbGxqEMwT/7zwl18Merptw4tgMzOuXcgQcdN74gf9+1Q=", + "scheme": "ECDSA_SHA512" + }, + "metaSignature": { + "key": "A5Uwffq176bltLzsKqosFm8qgPpXkWGCViAzS3yl10QM", + "signature": "BCKbKHeu4grKbOFdS/8t2bWsZTHwBh8/RuXsiAuq595J2jIKVE6d/5dMbqr4/936R77V2EuojTjp430AtGaLtYU=", + "scheme": "ECDSA_SHA512" + }, + "originSignature": { + "key": "A5Uwffq176bltLzsKqosFm8qgPpXkWGCViAzS3yl10QM", + "signature": "BIhqtgDFa/itDMfEd3S4JJFpeqidGUDc8IkI4iNh5ICtofk5QgccQPIU6LfR+zV6ybmevw2yXlvbQ3XaOTouiuw=", + "scheme": "ECDSA_SHA512" + } + } + } + } +} diff --git a/crypto/testdata/container_get_response b/crypto/testdata/container_get_response new file mode 100644 index 000000000..1088fcc4a --- /dev/null +++ b/crypto/testdata/container_get_response @@ -0,0 +1 @@ +0ac7010a570a0408011002120a0a08616e795f757365721a09616e795f6e6f6e636520032a160a09617474725f6b6579311209617474725f76616c312a160a09617474725f6b6579321209617474725f76616c3232060a0208041005121f0a0e616e795f7075626c69635f6b6579120d616e795f7369676e61747572651a4b0a360a09616e795f746f6b656e120a0a08616e795f757365721a06080310021801220f616e795f73657373696f6e5f6b657932040802100112110a0d616e795f7369676e617475726518011285020a0608c90110ca0110b00218af0222200a0e786865616465725f6b6579333031120e786865616465725f76616c33303122200a0e786865616465725f6b6579333032120e786865616465725f76616c3330322aab010a0608c90110ca0110cc0118cb0122200a0e786865616465725f6b6579323031120e786865616465725f76616c32303122200a0e786865616465725f6b6579323032120e786865616465725f76616c3230322a520a04086510661068186722200a0e786865616465725f6b6579313031120e786865616465725f76616c31303122200a0e786865616465725f6b6579313032120e786865616465725f76616c3130323202086a320308ce01320308b2021a820612770a210347e61f7befaa29e2946f8258fd1b0594172a5bb5f018531225e01150110dfde212508cd23e926cc95b03904d7d9ef616c9ea63acf5c33e1a35f3697f7555bea75b09593307827b9a8fafdd7c682d58e51f1120d7ea0a0969c66c1e29942f6e84104b2b171091686a9f25dfc1b6843697a5f918021a770a210347e61f7befaa29e2946f8258fd1b0594172a5bb5f018531225e01150110dfde2125068c8f55582ed44ef5903cff7eb087ecb322787cc1bea3f06de6c17d0986f1ebfb0a1c52d24c9d99eea7b0a11185abb056fde633ba277d8ad2e991a7c7fd17d980b8b15b08c712c0afb86be6bd3fc76851802228d0412670a2102ba46a641eea026deed3ebf826b9da36beef49198bee6c90b671b403361d4c5c1124084b540a86f07ff0741f13603f02be764480322474fc32776588013b1e0cceaecb2dc73514a23d50280942df4e94c7c13d16e7ff1f15c0d0865e7d1099a20295418011a670a2102ba46a641eea026deed3ebf826b9da36beef49198bee6c90b671b403361d4c5c1124037fe07c4b6019e64c0f65a03ab1596cb6d1ed2b151ccb8aa6fda4395426e8fb9ad7706378bff11d986e4fe39b245041ef374e35f36c846a165bc3aa7929ac26f180122b8020a660a2103eea345b5ab9b84e208996e097221fa0c02d771cae877463fd13c04da4686a97a1241048302ad6712ca68c7766b7cb0175d6a1a1fc7052b890c6ff8cc7693dc3c8cfdd963970aae0e7e370234edfaae2d5b4d41d2087c157a4cf00270f4d8fa92b2a0b112660a2103eea345b5ab9b84e208996e097221fa0c02d771cae877463fd13c04da4686a97a1241047af1b1cf600fec4d7ce2be597ed6c0f44e5ad16818e0410ef1bdacb3df8c4a9851e1a09948628ada51f98d0f8ef9bc2736a20c6bbfbdefea19ae6c5efd49f4f41a660a2103eea345b5ab9b84e208996e097221fa0c02d771cae877463fd13c04da4686a97a124104f7471aa5ab60b92bb17944477c302eeb26b0eee53ecd347b8c7cca4dcd8d30ee962d541f6dd6a69fae63407fccbc4c6446facf9eb273ca39102979db04f917df \ No newline at end of file diff --git a/crypto/testdata/container_get_response.json b/crypto/testdata/container_get_response.json new file mode 100644 index 000000000..aecaa4be4 --- /dev/null +++ b/crypto/testdata/container_get_response.json @@ -0,0 +1,163 @@ +{ + "body": { + "container": { + "version": { + "major": 1, + "minor": 2 + }, + "ownerID": { + "value": "YW55X3VzZXI=" + }, + "nonce": "YW55X25vbmNl", + "basicACL": 3, + "attributes": [ + { + "key": "attr_key1", + "value": "attr_val1" + }, + { + "key": "attr_key2", + "value": "attr_val2" + } + ], + "placementPolicy": { + "replicas": [ + { + "count": 4 + } + ], + "containerBackupFactor": 5 + } + }, + "signature": { + "key": "YW55X3B1YmxpY19rZXk=", + "signature": "YW55X3NpZ25hdHVyZQ==" + }, + "sessionToken": { + "body": { + "id": "YW55X3Rva2Vu", + "ownerID": { + "value": "YW55X3VzZXI=" + }, + "lifetime": { + "exp": "3", + "nbf": "2", + "iat": "1" + }, + "sessionKey": "YW55X3Nlc3Npb25fa2V5", + "container": { + "verb": "DELETE", + "wildcard": true + } + }, + "signature": { + "key": "YW55X3NpZ25hdHVyZQ==", + "scheme": "ECDSA_RFC6979_SHA256" + } + } + }, + "metaHeader": { + "version": { + "major": 201, + "minor": 202 + }, + "epoch": "304", + "ttl": 303, + "xHeaders": [ + { + "key": "xheader_key301", + "value": "xheader_val301" + }, + { + "key": "xheader_key302", + "value": "xheader_val302" + } + ], + "origin": { + "version": { + "major": 201, + "minor": 202 + }, + "epoch": "204", + "ttl": 203, + "xHeaders": [ + { + "key": "xheader_key201", + "value": "xheader_val201" + }, + { + "key": "xheader_key202", + "value": "xheader_val202" + } + ], + "origin": { + "version": { + "major": 101, + "minor": 102 + }, + "epoch": "104", + "ttl": 103, + "xHeaders": [ + { + "key": "xheader_key101", + "value": "xheader_val101" + }, + { + "key": "xheader_key102", + "value": "xheader_val102" + } + ], + "status": { + "code": 106 + } + }, + "status": { + "code": 206 + } + }, + "status": { + "code": 306 + } + }, + "verifyHeader": { + "metaSignature": { + "key": "A0fmH3vvqinilG+CWP0bBZQXKlu18BhTEiXgEVARDf3i", + "signature": "jNI+kmzJWwOQTX2e9hbJ6mOs9cM+GjXzaX91Vb6nWwlZMweCe5qPr918aC1Y5R8RINfqCglpxmweKZQvboQQSysXEJFoap8l38G2hDaXpfk=", + "scheme": "ECDSA_RFC6979_SHA256_WALLET_CONNECT" + }, + "originSignature": { + "key": "A0fmH3vvqinilG+CWP0bBZQXKlu18BhTEiXgEVARDf3i", + "signature": "aMj1VYLtRO9ZA8/36wh+yzInh8wb6j8G3mwX0JhvHr+wocUtJMnZnup7ChEYWrsFb95jO6J32K0umRp8f9F9mAuLFbCMcSwK+4a+a9P8doU=", + "scheme": "ECDSA_RFC6979_SHA256_WALLET_CONNECT" + }, + "origin": { + "metaSignature": { + "key": "ArpGpkHuoCbe7T6/gmudo2vu9JGYvubJC2cbQDNh1MXB", + "signature": "hLVAqG8H/wdB8TYD8CvnZEgDIkdPwyd2WIATseDM6uyy3HNRSiPVAoCULfTpTHwT0W5/8fFcDQhl59EJmiApVA==", + "scheme": "ECDSA_RFC6979_SHA256" + }, + "originSignature": { + "key": "ArpGpkHuoCbe7T6/gmudo2vu9JGYvubJC2cbQDNh1MXB", + "signature": "N/4HxLYBnmTA9loDqxWWy20e0rFRzLiqb9pDlUJuj7mtdwY3i/8R2Ybk/jmyRQQe83TjXzbIRqFlvDqnkprCbw==", + "scheme": "ECDSA_RFC6979_SHA256" + }, + "origin": { + "bodySignature": { + "key": "A+6jRbWrm4TiCJluCXIh+gwC13HK6HdGP9E8BNpGhql6", + "signature": "BIMCrWcSymjHdmt8sBddahofxwUriQxv+Mx2k9w8jP3ZY5cKrg5+NwI07fquLVtNQdIIfBV6TPACcPTY+pKyoLE=", + "scheme": "ECDSA_SHA512" + }, + "metaSignature": { + "key": "A+6jRbWrm4TiCJluCXIh+gwC13HK6HdGP9E8BNpGhql6", + "signature": "BHrxsc9gD+xNfOK+WX7WwPROWtFoGOBBDvG9rLPfjEqYUeGgmUhiitpR+Y0Pjvm8JzaiDGu/ve/qGa5sXv1J9PQ=", + "scheme": "ECDSA_SHA512" + }, + "originSignature": { + "key": "A+6jRbWrm4TiCJluCXIh+gwC13HK6HdGP9E8BNpGhql6", + "signature": "BPdHGqWrYLkrsXlER3wwLusmsO7lPs00e4x8yk3NjTDuli1UH23Wpp+uY0B/zLxMZEb6z56yc8o5ECl52wT5F98=", + "scheme": "ECDSA_SHA512" + } + } + } + } +} diff --git a/crypto/testdata/container_list_request b/crypto/testdata/container_list_request new file mode 100644 index 000000000..190dc7793 --- /dev/null +++ b/crypto/testdata/container_list_request @@ -0,0 +1 @@ +0a0c0a0a0a08616e795f7573657212ff010a0608c90110ca0110b00218af0222200a0e786865616465725f6b6579333031120e786865616465725f76616c33303122200a0e786865616465725f6b6579333032120e786865616465725f76616c3330323aa7010a0608c90110ca0110cc0118cb0122200a0e786865616465725f6b6579323031120e786865616465725f76616c32303122200a0e786865616465725f6b6579323032120e786865616465725f76616c3230323a500a04086510661068186722200a0e786865616465725f6b6579313031120e786865616465725f76616c31303122200a0e786865616465725f6b6579313032120e786865616465725f76616c313032400540cd0140b1021a820612770a2103c420d89f7325239189821b60afd8ab4ab8e45214b202c05ee49e398238360f1612501d9ad4c7e5e9f624630db81ab26afa330bfae69d65c9c7e3da796eb3d026361e4a09d9b83451e53eff279d7105c7b04034a4d8766d0f6d8cd964936565c9252ffa8ccd8d1ac3a304bef22eb6515ce53518021a770a2103c420d89f7325239189821b60afd8ab4ab8e45214b202c05ee49e398238360f1612507e4dee13f5808ea031e17a1938cdb8992f68e0282636c8394b04a40f85f2ef67384558b1af9a0b2f918caf5877dfc567b1ee40c24901f31bd8d8e1411e1bd685014f499d6ca4ed5bbbfa04c6c8da18f41802228d0412670a210374d42c725011c485560c9e7e31435d1f0cd2d23db88a2da14871f42b6de89a6f12400b0238f6c5cf08086fdaf909eb1b52c99cd4b3b9f05fcd9da911b08073747aa9530ac42d6975e15071ad85b37abecc919a84077211301712c8332b64894f9e0918011a670a210374d42c725011c485560c9e7e31435d1f0cd2d23db88a2da14871f42b6de89a6f124078eae8d07060dd06f1ed054950fed38f16ec29ec0ed3ddc9982cef690348d58172ae0874360c7e998c7738d0912c67a1d90419200487704f0546f1491a3c3e93180122b8020a660a2102f686952ddf9d3f05f29bd42c5b0a521f89fc32ffd44a91efe16e6a803829d851124104fcf6c3a0fcd6db22778633c21e76898c7991259abe1d4884eaeb78a760944d320af23e0f68f8315f47e63513aac97dc32a83a6834d8436b7aa77b35cbc8f395912660a2102f686952ddf9d3f05f29bd42c5b0a521f89fc32ffd44a91efe16e6a803829d851124104c3b0eb9eea365a9f1f052d674d69d403cf79354c96e63469b2a52ff43802eca36635903eade660492fea6820de4836cefde92d15120a5b44efacebaae5437b401a660a2102f686952ddf9d3f05f29bd42c5b0a521f89fc32ffd44a91efe16e6a803829d85112410489d53accd6109009734b1f8684b30254c9418635843a823f4808f386cb0e169a8d7e1c4d23d9b23289826bafacb498aa76a4bc52f1860288ed951cf732f42672 \ No newline at end of file diff --git a/crypto/testdata/container_list_request.json b/crypto/testdata/container_list_request.json new file mode 100644 index 000000000..f0b89e901 --- /dev/null +++ b/crypto/testdata/container_list_request.json @@ -0,0 +1,102 @@ +{ + "body": { + "ownerId": { + "value": "YW55X3VzZXI=" + } + }, + "metaHeader": { + "version": { + "major": 201, + "minor": 202 + }, + "epoch": "304", + "ttl": 303, + "xHeaders": [ + { + "key": "xheader_key301", + "value": "xheader_val301" + }, + { + "key": "xheader_key302", + "value": "xheader_val302" + } + ], + "origin": { + "version": { + "major": 201, + "minor": 202 + }, + "epoch": "204", + "ttl": 203, + "xHeaders": [ + { + "key": "xheader_key201", + "value": "xheader_val201" + }, + { + "key": "xheader_key202", + "value": "xheader_val202" + } + ], + "origin": { + "version": { + "major": 101, + "minor": 102 + }, + "epoch": "104", + "ttl": 103, + "xHeaders": [ + { + "key": "xheader_key101", + "value": "xheader_val101" + }, + { + "key": "xheader_key102", + "value": "xheader_val102" + } + ], + "magicNumber": "5" + }, + "magicNumber": "205" + }, + "magicNumber": "305" + }, + "verifyHeader": { + "metaSignature": { + "key": "A8Qg2J9zJSORiYIbYK/Yq0q45FIUsgLAXuSeOYI4Ng8W", + "signature": "HZrUx+Xp9iRjDbgasmr6Mwv65p1lycfj2nlus9AmNh5KCdm4NFHlPv8nnXEFx7BANKTYdm0PbYzZZJNlZcklL/qMzY0aw6MEvvIutlFc5TU=", + "scheme": "ECDSA_RFC6979_SHA256_WALLET_CONNECT" + }, + "originSignature": { + "key": "A8Qg2J9zJSORiYIbYK/Yq0q45FIUsgLAXuSeOYI4Ng8W", + "signature": "fk3uE/WAjqAx4XoZOM24mS9o4CgmNsg5SwSkD4Xy72c4RVixr5oLL5GMr1h338Vnse5AwkkB8xvY2OFBHhvWhQFPSZ1spO1bu/oExsjaGPQ=", + "scheme": "ECDSA_RFC6979_SHA256_WALLET_CONNECT" + }, + "origin": { + "metaSignature": { + "key": "A3TULHJQEcSFVgyefjFDXR8M0tI9uIotoUhx9Ctt6Jpv", + "signature": "CwI49sXPCAhv2vkJ6xtSyZzUs7nwX82dqRGwgHN0eqlTCsQtaXXhUHGthbN6vsyRmoQHchEwFxLIMytkiU+eCQ==", + "scheme": "ECDSA_RFC6979_SHA256" + }, + "originSignature": { + "key": "A3TULHJQEcSFVgyefjFDXR8M0tI9uIotoUhx9Ctt6Jpv", + "signature": "eOro0HBg3Qbx7QVJUP7TjxbsKewO093JmCzvaQNI1YFyrgh0Ngx+mYx3ONCRLGeh2QQZIASHcE8FRvFJGjw+kw==", + "scheme": "ECDSA_RFC6979_SHA256" + }, + "origin": { + "bodySignature": { + "key": "AvaGlS3fnT8F8pvULFsKUh+J/DL/1EqR7+FuaoA4KdhR", + "signature": "BPz2w6D81tsid4Yzwh52iYx5kSWavh1IhOrreKdglE0yCvI+D2j4MV9H5jUTqsl9wyqDpoNNhDa3qnezXLyPOVk=" + }, + "metaSignature": { + "key": "AvaGlS3fnT8F8pvULFsKUh+J/DL/1EqR7+FuaoA4KdhR", + "signature": "BMOw657qNlqfHwUtZ01p1APPeTVMluY0abKlL/Q4AuyjZjWQPq3mYEkv6mgg3kg2zv3pLRUSCltE76zrquVDe0A=" + }, + "originSignature": { + "key": "AvaGlS3fnT8F8pvULFsKUh+J/DL/1EqR7+FuaoA4KdhR", + "signature": "BInVOszWEJAJc0sfhoSzAlTJQYY1hDqCP0gI84bLDhaajX4cTSPZsjKJgmuvrLSYqnakvFLxhgKI7ZUc9zL0JnI=" + } + } + } + } +} diff --git a/crypto/testdata/container_list_response b/crypto/testdata/container_list_response new file mode 100644 index 000000000..10f4043be --- /dev/null +++ b/crypto/testdata/container_list_response @@ -0,0 +1 @@ +0a240a100a0e616e795f636f6e7461696e6572310a100a0e616e795f636f6e7461696e6572321285020a0608c90110ca0110b00218af0222200a0e786865616465725f6b6579333031120e786865616465725f76616c33303122200a0e786865616465725f6b6579333032120e786865616465725f76616c3330322aab010a0608c90110ca0110cc0118cb0122200a0e786865616465725f6b6579323031120e786865616465725f76616c32303122200a0e786865616465725f6b6579323032120e786865616465725f76616c3230322a520a04086510661068186722200a0e786865616465725f6b6579313031120e786865616465725f76616c31303122200a0e786865616465725f6b6579313032120e786865616465725f76616c3130323202086a320308ce01320308b2021a820612770a2102b571f51f3d32899c0abe04d71d0ce9d0a1d08b8730d858b345b1062d3ad7028e12502c601646b1ff9da95568540bb5c3079c8687bac21c60fa9a872e17369579b0f546826d7bb5993b7a64f438997446b7107c10d845bc44b1a4bdc49fcdcd86ef4093dc2d2805596831ab8ce3e83a0c8ccb18021a770a2102b571f51f3d32899c0abe04d71d0ce9d0a1d08b8730d858b345b1062d3ad7028e1250c72a0453c9fec7a3a971b2ae3bf5881cdca4b304ccf1d371c21d7bbe5639d3dbbb4f200549eb04d9caf832b4844929c6e88e187acf1a57df41cb754ae12ff1a62adad5cf35d28725982cc6d3520870fc1802228d0412670a210313a6a1e5cd92d204e2ec553e08077d2a565bf6d5e5e4c840b61d6aa5f93725de12400eb2612fe349a7d5c3d1bfe10c832107ca7c0245b5d96cc7fd53b813362032247a596c3842551a538952cdf48ce2cf27380a7dab689e3d8d4f184135621a9e3818011a670a210313a6a1e5cd92d204e2ec553e08077d2a565bf6d5e5e4c840b61d6aa5f93725de1240b1ac2bd53b80e6639381d11a27366dbde4e836c4086e14c00de5d4fdce7e09312894102b8e61c912218ddf8c6fe7ebd921e127091f05a7e02f0960bab7507265180122b8020a660a2102c0d65517cb51b16d67183e6590cf20baaa45a14a8177f52ce8776fa45f18fba0124104a83146819265d9661cff0862b1a9bc2871910b24adf7df4211c5c5d1dc8f170233f1cffef8c4dd1440e7bcd9c1c0bfdaeb3a2ecccff63fdac48ed0c7730822eb12660a2102c0d65517cb51b16d67183e6590cf20baaa45a14a8177f52ce8776fa45f18fba012410466a9060251ff155eb16060e295be2304f981f49f2f7d66b49096662086c0a31724a7155dce21f763c6abcda38d99d8c9557aced3503abaf6c213edf8c94be6db1a660a2102c0d65517cb51b16d67183e6590cf20baaa45a14a8177f52ce8776fa45f18fba0124104ab0d6e479628e899b943ac7556f95780fd29457b41e40700d0600012e14117d6a0bcfd1fb55e9dc805eba128819d05c60b2f5aaf371a5fd58bc578a0896244ee \ No newline at end of file diff --git a/crypto/testdata/container_list_response.json b/crypto/testdata/container_list_response.json new file mode 100644 index 000000000..ab907af0e --- /dev/null +++ b/crypto/testdata/container_list_response.json @@ -0,0 +1,113 @@ +{ + "body": { + "containerIds": [ + { + "value": "YW55X2NvbnRhaW5lcjE=" + }, + { + "value": "YW55X2NvbnRhaW5lcjI=" + } + ] + }, + "metaHeader": { + "version": { + "major": 201, + "minor": 202 + }, + "epoch": "304", + "ttl": 303, + "xHeaders": [ + { + "key": "xheader_key301", + "value": "xheader_val301" + }, + { + "key": "xheader_key302", + "value": "xheader_val302" + } + ], + "origin": { + "version": { + "major": 201, + "minor": 202 + }, + "epoch": "204", + "ttl": 203, + "xHeaders": [ + { + "key": "xheader_key201", + "value": "xheader_val201" + }, + { + "key": "xheader_key202", + "value": "xheader_val202" + } + ], + "origin": { + "version": { + "major": 101, + "minor": 102 + }, + "epoch": "104", + "ttl": 103, + "xHeaders": [ + { + "key": "xheader_key101", + "value": "xheader_val101" + }, + { + "key": "xheader_key102", + "value": "xheader_val102" + } + ], + "status": { + "code": 106 + } + }, + "status": { + "code": 206 + } + }, + "status": { + "code": 306 + } + }, + "verifyHeader": { + "metaSignature": { + "key": "ArVx9R89MomcCr4E1x0M6dCh0IuHMNhYs0WxBi061wKO", + "signature": "LGAWRrH/nalVaFQLtcMHnIaHusIcYPqahy4XNpV5sPVGgm17tZk7emT0OJl0RrcQfBDYRbxEsaS9xJ/NzYbvQJPcLSgFWWgxq4zj6DoMjMs=", + "scheme": "ECDSA_RFC6979_SHA256_WALLET_CONNECT" + }, + "originSignature": { + "key": "ArVx9R89MomcCr4E1x0M6dCh0IuHMNhYs0WxBi061wKO", + "signature": "xyoEU8n+x6OpcbKuO/WIHNykswTM8dNxwh17vlY509u7TyAFSesE2cr4MrSESSnG6I4Yes8aV99By3VK4S/xpira1c810oclmCzG01IIcPw=", + "scheme": "ECDSA_RFC6979_SHA256_WALLET_CONNECT" + }, + "origin": { + "metaSignature": { + "key": "AxOmoeXNktIE4uxVPggHfSpWW/bV5eTIQLYdaqX5NyXe", + "signature": "DrJhL+NJp9XD0b/hDIMhB8p8AkW12WzH/VO4EzYgMiR6WWw4QlUaU4lSzfSM4s8nOAp9q2iePY1PGEE1YhqeOA==", + "scheme": "ECDSA_RFC6979_SHA256" + }, + "originSignature": { + "key": "AxOmoeXNktIE4uxVPggHfSpWW/bV5eTIQLYdaqX5NyXe", + "signature": "sawr1TuA5mOTgdEaJzZtveToNsQIbhTADeXU/c5+CTEolBArjmHJEiGN34xv5+vZIeEnCR8Fp+AvCWC6t1ByZQ==", + "scheme": "ECDSA_RFC6979_SHA256" + }, + "origin": { + "bodySignature": { + "key": "AsDWVRfLUbFtZxg+ZZDPILqqRaFKgXf1LOh3b6RfGPug", + "signature": "BKgxRoGSZdlmHP8IYrGpvChxkQskrfffQhHFxdHcjxcCM/HP/vjE3RRA57zZwcC/2us6LszP9j/axI7Qx3MIIus=" + }, + "metaSignature": { + "key": "AsDWVRfLUbFtZxg+ZZDPILqqRaFKgXf1LOh3b6RfGPug", + "signature": "BGapBgJR/xVesWBg4pW+IwT5gfSfL31mtJCWZiCGwKMXJKcVXc4h92PGq82jjZnYyVV6ztNQOrr2whPt+MlL5ts=" + }, + "originSignature": { + "key": "AsDWVRfLUbFtZxg+ZZDPILqqRaFKgXf1LOh3b6RfGPug", + "signature": "BKsNbkeWKOiZuUOsdVb5V4D9KUV7QeQHANBgABLhQRfWoLz9H7VencgF66EogZ0FxgsvWq83Gl/Vi8V4oIliRO4=" + } + } + } + } +} diff --git a/crypto/testdata/container_put_request b/crypto/testdata/container_put_request new file mode 100644 index 000000000..cf7c40737 --- /dev/null +++ b/crypto/testdata/container_put_request @@ -0,0 +1 @@ +0a7a0a570a0408011002120a0a08616e795f757365721a09616e795f6e6f6e636520032a160a09617474725f6b6579311209617474725f76616c312a160a09617474725f6b6579321209617474725f76616c3232060a0208041005121f0a0e616e795f7075626c69635f6b6579120d616e795f7369676e617475726512ff010a0608c90110ca0110b00218af0222200a0e786865616465725f6b6579333031120e786865616465725f76616c33303122200a0e786865616465725f6b6579333032120e786865616465725f76616c3330323aa7010a0608c90110ca0110cc0118cb0122200a0e786865616465725f6b6579323031120e786865616465725f76616c32303122200a0e786865616465725f6b6579323032120e786865616465725f76616c3230323a500a04086510661068186722200a0e786865616465725f6b6579313031120e786865616465725f76616c31303122200a0e786865616465725f6b6579313032120e786865616465725f76616c313032406940cd0140b1021a820612770a2103ddabfc510d599fdcb3c21318cb2d680d99fa44c27ed2a1a8ee310e6193fc1e5f1250a9e87d1470818eeed2ddefa0e1cd01c0b7f6d1e0301dca3c7f2de6c07e26dcb236ebbe5e131eceda54f3936ea6db364ce59d464bb83674f25c26ac491591659279c956eeb4c15875fbca7cd69779183018021a770a2103ddabfc510d599fdcb3c21318cb2d680d99fa44c27ed2a1a8ee310e6193fc1e5f1250a9dcdc33b992706efff69dab9165d675db3a6f491b49ab5f29d87dc3ace5903bd97fd1dc213ea4c1ee30ac02ffca1dbc49047ac84b9448c83e5ab7425206ec364ffdc095ef625a470abaf058ef79cdda1802228d0412670a21025a7f21f36f86933c8d1bc7d3699f93eecde7ad6696f49d418cc66fd413d16b1c124095a28864d922ab91e8596904d2cae02b60d336c135b9a494b8d293c872370529d115d75e29d00c9795605874274a1e1f5d2353a239cc9f20a5c6cab0c9987a5118011a670a21025a7f21f36f86933c8d1bc7d3699f93eecde7ad6696f49d418cc66fd413d16b1c124069df0fd9f22d50181a2a1ba12ba90ac8c55c1874c88b3ee81fcc37cfc31e0310a8ccd015a79b0f28078ea2de454614e28676ef91d48db9868839e7940513417a180122b8020a660a21021b30214fb6dd011fe7d24699f881f3dde86356aded366c268af280c78a9441d0124104ec203c99e715eadae3ecdef6431d74ec88e93f37120b94b0adb57602cf19662e64212434b48a006787dd13beff6a56f767dd5482fb33424b67e3ca96bb26fe8b12660a21021b30214fb6dd011fe7d24699f881f3dde86356aded366c268af280c78a9441d0124104719bcf576a6aabcddd75aa17cb5c50f299cdf713b03eb4e4597cf90472966b9f6717eca14936bb3e500ef0f02464babada5d0b6eaa1b2a19f8bf05049192b6be1a660a21021b30214fb6dd011fe7d24699f881f3dde86356aded366c268af280c78a9441d0124104288e9800ef86f69c2342020cfa5e59be040b81044b6fdf29bb7c35056152e9bad6487cfa97643d43fe24a12a70bae0d3bc68ddfb2ad732b9a0028c4e3a4f8d35 \ No newline at end of file diff --git a/crypto/testdata/container_put_request.json b/crypto/testdata/container_put_request.json new file mode 100644 index 000000000..cec45cd10 --- /dev/null +++ b/crypto/testdata/container_put_request.json @@ -0,0 +1,135 @@ +{ + "body": { + "container": { + "version": { + "major": 1, + "minor": 2 + }, + "ownerID": { + "value": "YW55X3VzZXI=" + }, + "nonce": "YW55X25vbmNl", + "basicACL": 3, + "attributes": [ + { + "key": "attr_key1", + "value": "attr_val1" + }, + { + "key": "attr_key2", + "value": "attr_val2" + } + ], + "placementPolicy": { + "replicas": [ + { + "count": 4 + } + ], + "containerBackupFactor": 5 + } + }, + "signature": { + "key": "YW55X3B1YmxpY19rZXk=", + "signature": "YW55X3NpZ25hdHVyZQ==" + } + }, + "metaHeader": { + "version": { + "major": 201, + "minor": 202 + }, + "epoch": "304", + "ttl": 303, + "xHeaders": [ + { + "key": "xheader_key301", + "value": "xheader_val301" + }, + { + "key": "xheader_key302", + "value": "xheader_val302" + } + ], + "origin": { + "version": { + "major": 201, + "minor": 202 + }, + "epoch": "204", + "ttl": 203, + "xHeaders": [ + { + "key": "xheader_key201", + "value": "xheader_val201" + }, + { + "key": "xheader_key202", + "value": "xheader_val202" + } + ], + "origin": { + "version": { + "major": 101, + "minor": 102 + }, + "epoch": "104", + "ttl": 103, + "xHeaders": [ + { + "key": "xheader_key101", + "value": "xheader_val101" + }, + { + "key": "xheader_key102", + "value": "xheader_val102" + } + ], + "magicNumber": "105" + }, + "magicNumber": "205" + }, + "magicNumber": "305" + }, + "verifyHeader": { + "metaSignature": { + "key": "A92r/FENWZ/cs8ITGMstaA2Z+kTCftKhqO4xDmGT/B5f", + "signature": "qeh9FHCBju7S3e+g4c0BwLf20eAwHco8fy3mwH4m3LI2675eEx7O2lTzk26m2zZM5Z1GS7g2dPJcJqxJFZFlknnJVu60wVh1+8p81pd5GDA=", + "scheme": "ECDSA_RFC6979_SHA256_WALLET_CONNECT" + }, + "originSignature": { + "key": "A92r/FENWZ/cs8ITGMstaA2Z+kTCftKhqO4xDmGT/B5f", + "signature": "qdzcM7mScG7/9p2rkWXWdds6b0kbSatfKdh9w6zlkDvZf9HcIT6kwe4wrAL/yh28SQR6yEuUSMg+WrdCUgbsNk/9wJXvYlpHCrrwWO95zdo=", + "scheme": "ECDSA_RFC6979_SHA256_WALLET_CONNECT" + }, + "origin": { + "metaSignature": { + "key": "Alp/IfNvhpM8jRvH02mfk+7N561mlvSdQYzGb9QT0Wsc", + "signature": "laKIZNkiq5HoWWkE0srgK2DTNsE1uaSUuNKTyHI3BSnRFddeKdAMl5VgWHQnSh4fXSNTojnMnyClxsqwyZh6UQ==", + "scheme": "ECDSA_RFC6979_SHA256" + }, + "originSignature": { + "key": "Alp/IfNvhpM8jRvH02mfk+7N561mlvSdQYzGb9QT0Wsc", + "signature": "ad8P2fItUBgaKhuhK6kKyMVcGHTIiz7oH8w3z8MeAxCozNAVp5sPKAeOot5FRhTihnbvkdSNuYaIOeeUBRNBeg==", + "scheme": "ECDSA_RFC6979_SHA256" + }, + "origin": { + "bodySignature": { + "key": "AhswIU+23QEf59JGmfiB893oY1at7TZsJorygMeKlEHQ", + "signature": "BOwgPJnnFera4+ze9kMddOyI6T83EguUsK21dgLPGWYuZCEkNLSKAGeH3RO+/2pW92fdVIL7M0JLZ+PKlrsm/os=", + "scheme": "ECDSA_SHA512" + }, + "metaSignature": { + "key": "AhswIU+23QEf59JGmfiB893oY1at7TZsJorygMeKlEHQ", + "signature": "BHGbz1dqaqvN3XWqF8tcUPKZzfcTsD605Fl8+QRylmufZxfsoUk2uz5QDvDwJGS6utpdC26qGyoZ+L8FBJGStr4=", + "scheme": "ECDSA_SHA512" + }, + "originSignature": { + "key": "AhswIU+23QEf59JGmfiB893oY1at7TZsJorygMeKlEHQ", + "signature": "BCiOmADvhvacI0ICDPpeWb4EC4EES2/fKbt8NQVhUum61kh8+pdkPUP+JKEqcLrg07xo3fsq1zK5oAKMTjpPjTU=", + "scheme": "ECDSA_SHA512" + } + } + } + } +} diff --git a/crypto/testdata/container_put_response b/crypto/testdata/container_put_response new file mode 100644 index 000000000..6f39e40d5 --- /dev/null +++ b/crypto/testdata/container_put_response @@ -0,0 +1 @@ +0a110a0f0a0d616e795f636f6e7461696e65721285020a0608c90110ca0110b00218af0222200a0e786865616465725f6b6579333031120e786865616465725f76616c33303122200a0e786865616465725f6b6579333032120e786865616465725f76616c3330322aab010a0608c90110ca0110cc0118cb0122200a0e786865616465725f6b6579323031120e786865616465725f76616c32303122200a0e786865616465725f6b6579323032120e786865616465725f76616c3230322a520a04086510661068186722200a0e786865616465725f6b6579313031120e786865616465725f76616c31303122200a0e786865616465725f6b6579313032120e786865616465725f76616c3130323202086a320308ce01320308b2021a820612770a2103ff7443d4dd5fb61adac51c69c4d5893c1fce0de01e8405aff855d7c8111ef2c612501767dd91f6a0caf8aabefb15d798f886682cbd3172c41c95a6bb8642491a9f19ade074d46c97095051bef309f34a945d490db363b900e9dc1fef24660f34ec023a3881ec2be0133fd4b9f5aa5d16bae218021a770a2103ff7443d4dd5fb61adac51c69c4d5893c1fce0de01e8405aff855d7c8111ef2c61250852a9fb2862efa8b169f6ef442c071e41f37aead6932c20abb01a0a3e250513cbbfe7d50a770141ded35b01a62ce398c188a26afcb03ab54cac5dfd9675b3bc20d8732b9077749bd8bc32ab7b6f7452a1802228d0412670a2103c5ca01b42cb0a5f2b63d6c118a9c77ceae0f44649aafb5bfbca83d3f42849c671240cb1846293c4af56e006c31c1c35ce7031605a3c07a7638a7af94e92e328f38da20d0d2216ffae2944d3778cde5d4dccf2ba7ab5f162473e0d79790ea2ff01f8918011a670a2103c5ca01b42cb0a5f2b63d6c118a9c77ceae0f44649aafb5bfbca83d3f42849c6712407469c3fd54c983eb6fe05008a361605b1145728fdd15488c3b87b43d1e0362a3bbadf2b10f4dfaf2f11bf3c9339eaad7748616686971502aa4871b42dd4373fd180122b8020a660a21030051cba06f3b20eeb79c2c9e06eafa34e1645fae74c754b122fa4f19e2b981c1124104c51052dff592a0e5268ccb60693424b9df6a606c83f7afece94c8b4ba590541243807080d9b40ee56dc4884906fbfdbaaa1aa4af25992e620e2f84d4d75c09f812660a21030051cba06f3b20eeb79c2c9e06eafa34e1645fae74c754b122fa4f19e2b981c11241047238c8809d9037efd3166d3229758ada31b7ccc4d4e5a3396185725e83b0ed9cfa325d193115a412b4d307ec3d54daf13cd46b26aa8d695e2dde33dc0887d9ea1a660a21030051cba06f3b20eeb79c2c9e06eafa34e1645fae74c754b122fa4f19e2b981c1124104e6731f361f0fc8e101c8cdee724bf49a7595ad1b8478e83940c10d1e6513f169d68d0fd210af2cf331ee1b3a64bcca761d2d99ae2b5b74c1f2d5f42e86564e5f \ No newline at end of file diff --git a/crypto/testdata/container_put_response.json b/crypto/testdata/container_put_response.json new file mode 100644 index 000000000..f5dba1394 --- /dev/null +++ b/crypto/testdata/container_put_response.json @@ -0,0 +1,108 @@ +{ + "body": { + "containerId": { + "value": "YW55X2NvbnRhaW5lcg==" + } + }, + "metaHeader": { + "version": { + "major": 201, + "minor": 202 + }, + "epoch": "304", + "ttl": 303, + "xHeaders": [ + { + "key": "xheader_key301", + "value": "xheader_val301" + }, + { + "key": "xheader_key302", + "value": "xheader_val302" + } + ], + "origin": { + "version": { + "major": 201, + "minor": 202 + }, + "epoch": "204", + "ttl": 203, + "xHeaders": [ + { + "key": "xheader_key201", + "value": "xheader_val201" + }, + { + "key": "xheader_key202", + "value": "xheader_val202" + } + ], + "origin": { + "version": { + "major": 101, + "minor": 102 + }, + "epoch": "104", + "ttl": 103, + "xHeaders": [ + { + "key": "xheader_key101", + "value": "xheader_val101" + }, + { + "key": "xheader_key102", + "value": "xheader_val102" + } + ], + "status": { + "code": 106 + } + }, + "status": { + "code": 206 + } + }, + "status": { + "code": 306 + } + }, + "verifyHeader": { + "metaSignature": { + "key": "A/90Q9TdX7Ya2sUcacTViTwfzg3gHoQFr/hV18gRHvLG", + "signature": "F2fdkfagyviqvvsV15j4hmgsvTFyxByVpruGQkkanxmt4HTUbJcJUFG+8wnzSpRdSQ2zY7kA6dwf7yRmDzTsAjo4gewr4BM/1Ln1ql0WuuI=", + "scheme": "ECDSA_RFC6979_SHA256_WALLET_CONNECT" + }, + "originSignature": { + "key": "A/90Q9TdX7Ya2sUcacTViTwfzg3gHoQFr/hV18gRHvLG", + "signature": "hSqfsoYu+osWn270QsBx5B83rq1pMsIKuwGgo+JQUTy7/n1Qp3AUHe01sBpizjmMGIomr8sDq1TKxd/ZZ1s7wg2HMrkHd0m9i8Mqt7b3RSo=", + "scheme": "ECDSA_RFC6979_SHA256_WALLET_CONNECT" + }, + "origin": { + "metaSignature": { + "key": "A8XKAbQssKXytj1sEYqcd86uD0Rkmq+1v7yoPT9ChJxn", + "signature": "yxhGKTxK9W4AbDHBw1znAxYFo8B6djinr5TpLjKPONog0NIhb/rilE03eM3l1NzPK6erXxYkc+DXl5DqL/AfiQ==", + "scheme": "ECDSA_RFC6979_SHA256" + }, + "originSignature": { + "key": "A8XKAbQssKXytj1sEYqcd86uD0Rkmq+1v7yoPT9ChJxn", + "signature": "dGnD/VTJg+tv4FAIo2FgWxFFco/dFUiMO4e0PR4DYqO7rfKxD0368vEb88kznqrXdIYWaGlxUCqkhxtC3UNz/Q==", + "scheme": "ECDSA_RFC6979_SHA256" + }, + "origin": { + "bodySignature": { + "key": "AwBRy6BvOyDut5wsngbq+jThZF+udMdUsSL6TxniuYHB", + "signature": "BMUQUt/1kqDlJozLYGk0JLnfamBsg/ev7OlMi0ulkFQSQ4BwgNm0DuVtxIhJBvv9uqoapK8lmS5iDi+E1NdcCfg=" + }, + "metaSignature": { + "key": "AwBRy6BvOyDut5wsngbq+jThZF+udMdUsSL6TxniuYHB", + "signature": "BHI4yICdkDfv0xZtMil1itoxt8zE1OWjOWGFcl6DsO2c+jJdGTEVpBK00wfsPVTa8TzUayaqjWleLd4z3AiH2eo=" + }, + "originSignature": { + "key": "AwBRy6BvOyDut5wsngbq+jThZF+udMdUsSL6TxniuYHB", + "signature": "BOZzHzYfD8jhAcjN7nJL9Jp1la0bhHjoOUDBDR5lE/Fp1o0P0hCvLPMx7hs6ZLzKdh0tma4rW3TB8tX0LoZWTl8=" + } + } + } + } +} diff --git a/eacl/enums.go b/eacl/enums.go index 6ced5a07b..25e0ecfbd 100644 --- a/eacl/enums.go +++ b/eacl/enums.go @@ -1,469 +1,128 @@ package eacl import ( - v2acl "github.com/nspcc-dev/neofs-api-go/v2/acl" + "strconv" ) -// Action taken if ContainerEACL record matched request. -// Action is compatible with v2 acl.Action enum. +// Action enumerates actions that may be applied within NeoFS access management. +// What and how specific Action affects depends on the specific context. type Action uint32 +// All supported [Action] values. const ( - // ActionUnknown is an Action value used to mark action as undefined. - ActionUnknown Action = iota - - // ActionAllow is an Action value that allows access to the operation from context. - ActionAllow - - // ActionDeny is an Action value that denies access to the operation from context. - ActionDeny -) - -// Operation is a object service method to match request. -// Operation is compatible with v2 acl.Operation enum. -type Operation uint32 - -const ( - // OperationUnknown is an Operation value used to mark operation as undefined. - OperationUnknown Operation = iota - - // OperationGet is an object get Operation. - OperationGet - - // OperationHead is an Operation of getting the object header. - OperationHead - - // OperationPut is an object put Operation. - OperationPut - - // OperationDelete is an object delete Operation. - OperationDelete - - // OperationSearch is an object search Operation. - OperationSearch - - // OperationRange is an object payload range retrieval Operation. - OperationRange - - // OperationRangeHash is an object payload range hashing Operation. - OperationRangeHash + _ Action = iota + ActionAllow // allows something + ActionDeny // denies something ) -// Role is a group of request senders to match request. -// Role is compatible with v2 acl.Role enum. +// Role enumerates groups of subjects requesting access to NeoFS resources. type Role uint32 +// All supported [Role] values. const ( - // RoleUnknown is a Role value used to mark role as undefined. - RoleUnknown Role = iota - - // RoleUser is a group of senders that contains only key of container owner. - RoleUser - - // RoleSystem is a group of senders that contains keys of container nodes and - // inner ring nodes. - RoleSystem - - // RoleOthers is a group of senders that contains none of above keys. - RoleOthers + _ Role = iota + RoleUser // owner of the container requesting its objects + RoleSystem // Deprecated: NeoFS storage and Inner Ring nodes + RoleOthers // any other party ) -// Match is binary operation on filer name and value to check if request is matched. -// Match is compatible with v2 acl.MatchType enum. +// Match enumerates operators to check attribute value compliance. What and how +// specific Match affects depends on the specific context. type Match uint32 +// All supported Match values. const ( - // MatchUnknown is a Match value used to mark matcher as undefined. - MatchUnknown Match = iota - - // MatchStringEqual is a Match of string equality. - MatchStringEqual - - // MatchStringNotEqual is a Match of string inequality. - MatchStringNotEqual - - // MatchNotPresent is an operator for attribute absence. - MatchNotPresent - - // MatchNumGT is a numeric "greater than" operator. - MatchNumGT - - // MatchNumGE is a numeric "greater or equal than" operator. - MatchNumGE - - // MatchNumLT is a numeric "less than" operator. - MatchNumLT - - // MatchNumLE is a numeric "less or equal than" operator. - MatchNumLE + _ Match = iota + MatchStringEqual // string equality + MatchStringNotEqual // string inequality + MatchNotPresent // attribute absence + MatchNumGT // numeric "greater than" operator + MatchNumGE // numeric "greater or equal than" operator + MatchNumLT // is a numeric "less than" operator + MatchNumLE // is a numeric "less or equal than" operator ) -// FilterHeaderType indicates source of headers to make matches. -// FilterHeaderType is compatible with v2 acl.HeaderType enum. -type FilterHeaderType uint32 +// AttributeType enumerates the classes of resource attributes processed within +// NeoFS access management. +type AttributeType uint32 const ( - // HeaderTypeUnknown is a FilterHeaderType value used to mark header type as undefined. - HeaderTypeUnknown FilterHeaderType = iota - - // HeaderFromRequest is a FilterHeaderType for request X-Header. - HeaderFromRequest - - // HeaderFromObject is a FilterHeaderType for object header. - HeaderFromObject - - // HeaderFromService is a FilterHeaderType for service header. - HeaderFromService + _ AttributeType = iota + AttributeAPIRequest // API request X-Header + AttributeObject // object attribute + AttributeCustomService // custom service attribute ) -// ToV2 converts Action to v2 Action enum value. -func (a Action) ToV2() v2acl.Action { - switch a { - case ActionAllow: - return v2acl.ActionAllow - case ActionDeny: - return v2acl.ActionDeny - default: - return v2acl.ActionUnknown - } -} - -// ActionFromV2 converts v2 Action enum value to Action. -func ActionFromV2(action v2acl.Action) (a Action) { - switch action { - case v2acl.ActionAllow: - a = ActionAllow - case v2acl.ActionDeny: - a = ActionDeny - default: - a = ActionUnknown - } - - return a -} - -// EncodeToString returns string representation of Action. -// -// String mapping: -// - ActionAllow: ALLOW; -// - ActionDeny: DENY; -// - ActionUnknown, default: ACTION_UNSPECIFIED. -func (a Action) EncodeToString() string { - return a.ToV2().String() -} - -// String implements fmt.Stringer. +// String implements [fmt.Stringer]. // // String is designed to be human-readable, and its format MAY differ between -// SDK versions. String MAY return same result as EncodeToString. String MUST NOT -// be used to encode ID into NeoFS protocol string. +// SDK versions. func (a Action) String() string { - return a.EncodeToString() -} - -// DecodeString parses Action from a string representation. -// It is a reverse action to EncodeToString(). -// -// Returns true if s was parsed successfully. -func (a *Action) DecodeString(s string) bool { - var g v2acl.Action - - ok := g.FromString(s) - - if ok { - *a = ActionFromV2(g) - } - - return ok -} - -// ToV2 converts Operation to v2 Operation enum value. -func (o Operation) ToV2() v2acl.Operation { - switch o { - case OperationGet: - return v2acl.OperationGet - case OperationHead: - return v2acl.OperationHead - case OperationPut: - return v2acl.OperationPut - case OperationDelete: - return v2acl.OperationDelete - case OperationSearch: - return v2acl.OperationSearch - case OperationRange: - return v2acl.OperationRange - case OperationRangeHash: - return v2acl.OperationRangeHash - default: - return v2acl.OperationUnknown - } -} - -// OperationFromV2 converts v2 Operation enum value to Operation. -func OperationFromV2(operation v2acl.Operation) (o Operation) { - switch operation { - case v2acl.OperationGet: - o = OperationGet - case v2acl.OperationHead: - o = OperationHead - case v2acl.OperationPut: - o = OperationPut - case v2acl.OperationDelete: - o = OperationDelete - case v2acl.OperationSearch: - o = OperationSearch - case v2acl.OperationRange: - o = OperationRange - case v2acl.OperationRangeHash: - o = OperationRangeHash + switch a { default: - o = OperationUnknown + return "UNKNOWN#" + strconv.FormatUint(uint64(a), 10) + case ActionAllow: + return "ALLOW" + case ActionDeny: + return "DENY" } - - return o -} - -// EncodeToString returns string representation of Operation. -// -// String mapping: -// - OperationGet: GET; -// - OperationHead: HEAD; -// - OperationPut: PUT; -// - OperationDelete: DELETE; -// - OperationSearch: SEARCH; -// - OperationRange: GETRANGE; -// - OperationRangeHash: GETRANGEHASH; -// - OperationUnknown, default: OPERATION_UNSPECIFIED. -func (o Operation) EncodeToString() string { - return o.ToV2().String() } -// String implements fmt.Stringer. +// String implements [fmt.Stringer]. // // String is designed to be human-readable, and its format MAY differ between -// SDK versions. String MAY return same result as EncodeToString. String MUST NOT -// be used to encode ID into NeoFS protocol string. -func (o Operation) String() string { - return o.EncodeToString() -} - -// DecodeString parses Operation from a string representation. -// It is a reverse action to EncodeToString(). -// -// Returns true if s was parsed successfully. -func (o *Operation) DecodeString(s string) bool { - var g v2acl.Operation - - ok := g.FromString(s) - - if ok { - *o = OperationFromV2(g) - } - - return ok -} - -// ToV2 converts Role to v2 Role enum value. -func (r Role) ToV2() v2acl.Role { +// SDK versions. +func (r Role) String() string { switch r { + default: + return "UNKNOWN#" + strconv.FormatUint(uint64(r), 10) case RoleUser: - return v2acl.RoleUser + return "USER" case RoleSystem: - return v2acl.RoleSystem + return "SYSTEM" case RoleOthers: - return v2acl.RoleOthers - default: - return v2acl.RoleUnknown - } -} - -// RoleFromV2 converts v2 Role enum value to Role. -func RoleFromV2(role v2acl.Role) (r Role) { - switch role { - case v2acl.RoleUser: - r = RoleUser - case v2acl.RoleSystem: - r = RoleSystem - case v2acl.RoleOthers: - r = RoleOthers - default: - r = RoleUnknown + return "OTHERS" } - - return r -} - -// EncodeToString returns string representation of Role. -// -// String mapping: -// - RoleUser: USER; -// - RoleSystem: SYSTEM; -// - RoleOthers: OTHERS; -// - RoleUnknown, default: ROLE_UNKNOWN. -func (r Role) EncodeToString() string { - return r.ToV2().String() } -// String implements fmt.Stringer. +// String implements [fmt.Stringer]. // // String is designed to be human-readable, and its format MAY differ between -// SDK versions. String MAY return same result as EncodeToString. String MUST NOT -// be used to encode ID into NeoFS protocol string. -func (r Role) String() string { - return r.EncodeToString() -} - -// DecodeString parses Role from a string representation. -// It is a reverse action to EncodeToString(). -// -// Returns true if s was parsed successfully. -func (r *Role) DecodeString(s string) bool { - var g v2acl.Role - - ok := g.FromString(s) - - if ok { - *r = RoleFromV2(g) - } - - return ok -} - -// ToV2 converts Match to v2 MatchType enum value. -func (m Match) ToV2() v2acl.MatchType { +// SDK versions. +func (m Match) String() string { switch m { - case - MatchStringEqual, - MatchStringNotEqual, - MatchNotPresent, - MatchNumGT, - MatchNumGE, - MatchNumLT, - MatchNumLE: - return v2acl.MatchType(m) - default: - return v2acl.MatchTypeUnknown - } -} - -// MatchFromV2 converts v2 MatchType enum value to Match. -func MatchFromV2(match v2acl.MatchType) Match { - switch match { - case - v2acl.MatchTypeStringEqual, - v2acl.MatchTypeStringNotEqual, - v2acl.MatchTypeNotPresent, - v2acl.MatchTypeNumGT, - v2acl.MatchTypeNumGE, - v2acl.MatchTypeNumLT, - v2acl.MatchTypeNumLE: - return Match(match) default: - return MatchUnknown + return "UNKNOWN#" + strconv.FormatUint(uint64(m), 10) + case MatchStringEqual: + return "STRING_EQUAL" + case MatchStringNotEqual: + return "STRING_NOT_EQUAL" + case MatchNotPresent: + return "NOT_PRESENT" + case MatchNumGT: + return "NUMERIC_GT" + case MatchNumGE: + return "NUMERIC_GE" + case MatchNumLT: + return "NUMERIC_LT" + case MatchNumLE: + return "NUMERIC_LE" } } -// EncodeToString returns string representation of Match. -// -// String mapping: -// - MatchStringEqual: STRING_EQUAL; -// - MatchStringNotEqual: STRING_NOT_EQUAL; -// - MatchNotPresent: NOT_PRESENT; -// - MatchNumGT: NUM_GT; -// - MatchNumGE: NUM_GE; -// - MatchNumLT: NUM_LT; -// - MatchNumLE: NUM_LE; -// - MatchUnknown, default: MATCH_TYPE_UNSPECIFIED. -func (m Match) EncodeToString() string { - return m.ToV2().String() -} - -// String implements fmt.Stringer. +// String implements [fmt.Stringer]. // // String is designed to be human-readable, and its format MAY differ between -// SDK versions. String MAY return same result as EncodeToString. String MUST NOT -// be used to encode ID into NeoFS protocol string. -func (m Match) String() string { - return m.EncodeToString() -} - -// DecodeString parses Match from a string representation. -// It is a reverse action to EncodeToString(). -// -// Returns true if s was parsed successfully. -func (m *Match) DecodeString(s string) bool { - var g v2acl.MatchType - - ok := g.FromString(s) - - if ok { - *m = MatchFromV2(g) - } - - return ok -} - -// ToV2 converts FilterHeaderType to v2 HeaderType enum value. -func (h FilterHeaderType) ToV2() v2acl.HeaderType { - switch h { - case HeaderFromRequest: - return v2acl.HeaderTypeRequest - case HeaderFromObject: - return v2acl.HeaderTypeObject - case HeaderFromService: - return v2acl.HeaderTypeService - default: - return v2acl.HeaderTypeUnknown - } -} - -// FilterHeaderTypeFromV2 converts v2 HeaderType enum value to FilterHeaderType. -func FilterHeaderTypeFromV2(header v2acl.HeaderType) (h FilterHeaderType) { - switch header { - case v2acl.HeaderTypeRequest: - h = HeaderFromRequest - case v2acl.HeaderTypeObject: - h = HeaderFromObject - case v2acl.HeaderTypeService: - h = HeaderFromService +// SDK versions. +func (x AttributeType) String() string { + switch x { default: - h = HeaderTypeUnknown - } - - return h -} - -// EncodeToString returns string representation of FilterHeaderType. -// -// String mapping: -// - HeaderFromRequest: REQUEST; -// - HeaderFromObject: OBJECT; -// - HeaderTypeUnknown, default: HEADER_UNSPECIFIED. -func (h FilterHeaderType) EncodeToString() string { - return h.ToV2().String() -} - -// String implements fmt.Stringer. -// -// String is designed to be human-readable, and its format MAY differ between -// SDK versions. String MAY return same result as EncodeToString. String MUST NOT -// be used to encode ID into NeoFS protocol string. -func (h FilterHeaderType) String() string { - return h.EncodeToString() -} - -// DecodeString parses FilterHeaderType from a string representation. -// It is a reverse action to EncodeToString(). -// -// Returns true if s was parsed successfully. -func (h *FilterHeaderType) DecodeString(s string) bool { - var g v2acl.HeaderType - - ok := g.FromString(s) - - if ok { - *h = FilterHeaderTypeFromV2(g) + return "UNKNOWN#" + strconv.FormatUint(uint64(x), 10) + case AttributeAPIRequest: + return "API_REQUEST" + case AttributeObject: + return "OBJECT" + case AttributeCustomService: + return "CUSTOM_SERVICE" } - - return ok } diff --git a/eacl/enums_test.go b/eacl/enums_test.go deleted file mode 100644 index d5d950860..000000000 --- a/eacl/enums_test.go +++ /dev/null @@ -1,224 +0,0 @@ -package eacl_test - -import ( - "testing" - - v2acl "github.com/nspcc-dev/neofs-api-go/v2/acl" - "github.com/nspcc-dev/neofs-sdk-go/eacl" - "github.com/stretchr/testify/require" -) - -var ( - eqV2Actions = map[eacl.Action]v2acl.Action{ - eacl.ActionUnknown: v2acl.ActionUnknown, - eacl.ActionAllow: v2acl.ActionAllow, - eacl.ActionDeny: v2acl.ActionDeny, - } - - eqV2Operations = map[eacl.Operation]v2acl.Operation{ - eacl.OperationUnknown: v2acl.OperationUnknown, - eacl.OperationGet: v2acl.OperationGet, - eacl.OperationHead: v2acl.OperationHead, - eacl.OperationPut: v2acl.OperationPut, - eacl.OperationDelete: v2acl.OperationDelete, - eacl.OperationSearch: v2acl.OperationSearch, - eacl.OperationRange: v2acl.OperationRange, - eacl.OperationRangeHash: v2acl.OperationRangeHash, - } - - eqV2Roles = map[eacl.Role]v2acl.Role{ - eacl.RoleUnknown: v2acl.RoleUnknown, - eacl.RoleUser: v2acl.RoleUser, - eacl.RoleSystem: v2acl.RoleSystem, - eacl.RoleOthers: v2acl.RoleOthers, - } - - eqV2Matches = map[eacl.Match]v2acl.MatchType{ - eacl.MatchUnknown: v2acl.MatchTypeUnknown, - eacl.MatchStringEqual: v2acl.MatchTypeStringEqual, - eacl.MatchStringNotEqual: v2acl.MatchTypeStringNotEqual, - eacl.MatchNotPresent: v2acl.MatchTypeNotPresent, - eacl.MatchNumGT: v2acl.MatchTypeNumGT, - eacl.MatchNumGE: v2acl.MatchTypeNumGE, - eacl.MatchNumLT: v2acl.MatchTypeNumLT, - eacl.MatchNumLE: v2acl.MatchTypeNumLE, - } - - eqV2HeaderTypes = map[eacl.FilterHeaderType]v2acl.HeaderType{ - eacl.HeaderTypeUnknown: v2acl.HeaderTypeUnknown, - eacl.HeaderFromRequest: v2acl.HeaderTypeRequest, - eacl.HeaderFromObject: v2acl.HeaderTypeObject, - eacl.HeaderFromService: v2acl.HeaderTypeService, - } -) - -func TestAction(t *testing.T) { - t.Run("known actions", func(t *testing.T) { - for i := eacl.ActionUnknown; i <= eacl.ActionDeny; i++ { - require.Equal(t, eqV2Actions[i], i.ToV2()) - require.Equal(t, eacl.ActionFromV2(i.ToV2()), i) - } - }) - - t.Run("unknown actions", func(t *testing.T) { - require.Equal(t, (eacl.ActionDeny + 1).ToV2(), v2acl.ActionUnknown) - require.Equal(t, eacl.ActionFromV2(v2acl.ActionDeny+1), eacl.ActionUnknown) - }) -} - -func TestOperation(t *testing.T) { - t.Run("known operations", func(t *testing.T) { - for i := eacl.OperationUnknown; i <= eacl.OperationRangeHash; i++ { - require.Equal(t, eqV2Operations[i], i.ToV2()) - require.Equal(t, eacl.OperationFromV2(i.ToV2()), i) - } - }) - - t.Run("unknown operations", func(t *testing.T) { - require.Equal(t, (eacl.OperationRangeHash + 1).ToV2(), v2acl.OperationUnknown) - require.Equal(t, eacl.OperationFromV2(v2acl.OperationRangeHash+1), eacl.OperationUnknown) - }) -} - -func TestRole(t *testing.T) { - t.Run("known roles", func(t *testing.T) { - for i := eacl.RoleUnknown; i <= eacl.RoleOthers; i++ { - require.Equal(t, eqV2Roles[i], i.ToV2()) - require.Equal(t, eacl.RoleFromV2(i.ToV2()), i) - } - }) - - t.Run("unknown roles", func(t *testing.T) { - require.Equal(t, (eacl.RoleOthers + 1).ToV2(), v2acl.RoleUnknown) - require.Equal(t, eacl.RoleFromV2(v2acl.RoleOthers+1), eacl.RoleUnknown) - }) -} - -func TestMatch(t *testing.T) { - t.Run("known matches", func(t *testing.T) { - for i := eacl.MatchUnknown; i <= eacl.MatchStringNotEqual; i++ { - require.Equal(t, eqV2Matches[i], i.ToV2()) - require.Equal(t, eacl.MatchFromV2(i.ToV2()), i) - } - }) - - t.Run("unknown matches", func(t *testing.T) { - require.Equal(t, (eacl.MatchNumLE + 1).ToV2(), v2acl.MatchTypeUnknown) - require.Equal(t, eacl.MatchFromV2(v2acl.MatchTypeNumLE+1), eacl.MatchUnknown) - }) -} - -func TestFilterHeaderType(t *testing.T) { - t.Run("known header types", func(t *testing.T) { - for i := eacl.HeaderTypeUnknown; i <= eacl.HeaderFromService; i++ { - require.Equal(t, eqV2HeaderTypes[i], i.ToV2()) - require.Equal(t, eacl.FilterHeaderTypeFromV2(i.ToV2()), i) - } - }) - - t.Run("unknown header types", func(t *testing.T) { - require.Equal(t, (eacl.HeaderFromService + 1).ToV2(), v2acl.HeaderTypeUnknown) - require.Equal(t, eacl.FilterHeaderTypeFromV2(v2acl.HeaderTypeService+1), eacl.HeaderTypeUnknown) - }) -} - -type enumIface interface { - DecodeString(string) bool - EncodeToString() string -} - -type enumStringItem struct { - val enumIface - str string -} - -func testEnumStrings(t *testing.T, e enumIface, items []enumStringItem) { - for _, item := range items { - require.Equal(t, item.str, item.val.EncodeToString()) - - s := item.val.EncodeToString() - - require.True(t, e.DecodeString(s), s) - - require.EqualValues(t, item.val, e, item.val) - } - - // incorrect strings - for _, str := range []string{ - "some string", - "UNSPECIFIED", - } { - require.False(t, e.DecodeString(str)) - } -} - -func TestAction_String(t *testing.T) { - toPtr := func(v eacl.Action) *eacl.Action { - return &v - } - - testEnumStrings(t, new(eacl.Action), []enumStringItem{ - {val: toPtr(eacl.ActionAllow), str: "ALLOW"}, - {val: toPtr(eacl.ActionDeny), str: "DENY"}, - {val: toPtr(eacl.ActionUnknown), str: "ACTION_UNSPECIFIED"}, - }) -} - -func TestRole_String(t *testing.T) { - toPtr := func(v eacl.Role) *eacl.Role { - return &v - } - - testEnumStrings(t, new(eacl.Role), []enumStringItem{ - {val: toPtr(eacl.RoleUser), str: "USER"}, - {val: toPtr(eacl.RoleSystem), str: "SYSTEM"}, - {val: toPtr(eacl.RoleOthers), str: "OTHERS"}, - {val: toPtr(eacl.RoleUnknown), str: "ROLE_UNSPECIFIED"}, - }) -} - -func TestOperation_String(t *testing.T) { - toPtr := func(v eacl.Operation) *eacl.Operation { - return &v - } - - testEnumStrings(t, new(eacl.Operation), []enumStringItem{ - {val: toPtr(eacl.OperationGet), str: "GET"}, - {val: toPtr(eacl.OperationPut), str: "PUT"}, - {val: toPtr(eacl.OperationHead), str: "HEAD"}, - {val: toPtr(eacl.OperationDelete), str: "DELETE"}, - {val: toPtr(eacl.OperationSearch), str: "SEARCH"}, - {val: toPtr(eacl.OperationRange), str: "GETRANGE"}, - {val: toPtr(eacl.OperationRangeHash), str: "GETRANGEHASH"}, - {val: toPtr(eacl.OperationUnknown), str: "OPERATION_UNSPECIFIED"}, - }) -} - -func TestMatch_String(t *testing.T) { - toPtr := func(v eacl.Match) *eacl.Match { - return &v - } - - testEnumStrings(t, new(eacl.Match), []enumStringItem{ - {val: toPtr(eacl.MatchStringEqual), str: "STRING_EQUAL"}, - {val: toPtr(eacl.MatchStringNotEqual), str: "STRING_NOT_EQUAL"}, - {val: toPtr(eacl.MatchUnknown), str: "MATCH_TYPE_UNSPECIFIED"}, - {val: toPtr(eacl.MatchNotPresent), str: "NOT_PRESENT"}, - {val: toPtr(eacl.MatchNumGT), str: "NUM_GT"}, - {val: toPtr(eacl.MatchNumGE), str: "NUM_GE"}, - {val: toPtr(eacl.MatchNumLT), str: "NUM_LT"}, - {val: toPtr(eacl.MatchNumLE), str: "NUM_LE"}, - }) -} - -func TestFilterHeaderType_String(t *testing.T) { - toPtr := func(v eacl.FilterHeaderType) *eacl.FilterHeaderType { - return &v - } - - testEnumStrings(t, new(eacl.FilterHeaderType), []enumStringItem{ - {val: toPtr(eacl.HeaderFromRequest), str: "REQUEST"}, - {val: toPtr(eacl.HeaderFromObject), str: "OBJECT"}, - {val: toPtr(eacl.HeaderTypeUnknown), str: "HEADER_UNSPECIFIED"}, - }) -} diff --git a/eacl/filter.go b/eacl/filter.go index c613a317c..f859845a7 100644 --- a/eacl/filter.go +++ b/eacl/filter.go @@ -1,157 +1,120 @@ package eacl import ( - "strconv" + "errors" - v2acl "github.com/nspcc-dev/neofs-api-go/v2/acl" + apiacl "github.com/nspcc-dev/neofs-sdk-go/api/acl" ) -// Filter defines check conditions if request header is matched or not. Matched -// header means that request should be processed according to ContainerEACL action. -// -// Filter is compatible with v2 acl.EACLRecord.Filter message. +// Filter describes a binary property of an access-controlled NeoFS resource +// according to meta information about it. The meta information is represented +// by a set of key-value attributes of various types. type Filter struct { - from FilterHeaderType - matcher Match - key string - value stringEncoder + attrType AttributeType + matcher Match + key string + value string } -type staticStringer string - -type u64Stringer uint64 - // Various keys to object filters. const ( - FilterObjectVersion = v2acl.FilterObjectVersion - FilterObjectID = v2acl.FilterObjectID - FilterObjectContainerID = v2acl.FilterObjectContainerID - FilterObjectOwnerID = v2acl.FilterObjectOwnerID - FilterObjectCreationEpoch = v2acl.FilterObjectCreationEpoch - FilterObjectPayloadSize = v2acl.FilterObjectPayloadLength - FilterObjectPayloadChecksum = v2acl.FilterObjectPayloadHash - FilterObjectType = v2acl.FilterObjectType - FilterObjectPayloadHomomorphicChecksum = v2acl.FilterObjectHomomorphicHash + objectFilterPrefix = "$Object:" + FilterObjectVersion = objectFilterPrefix + "version" + FilterObjectID = objectFilterPrefix + "objectID" + FilterObjectContainerID = objectFilterPrefix + "containerID" + FilterObjectOwnerID = objectFilterPrefix + "ownerID" + FilterObjectCreationEpoch = objectFilterPrefix + "creationEpoch" + FilterObjectPayloadSize = objectFilterPrefix + "payloadLength" + FilterObjectPayloadChecksum = objectFilterPrefix + "payloadHash" + FilterObjectType = objectFilterPrefix + "objectType" + FilterObjectPayloadHomomorphicChecksum = objectFilterPrefix + "homomorphicHash" ) -func (s staticStringer) EncodeToString() string { - return string(s) -} - -func (u u64Stringer) EncodeToString() string { - return strconv.FormatUint(uint64(u), 10) -} - // CopyTo writes deep copy of the [Filter] to dst. func (f Filter) CopyTo(dst *Filter) { - dst.from = f.from - dst.matcher = f.matcher - dst.key = f.key - dst.value = f.value -} - -// Value returns filtered string value. -func (f Filter) Value() string { - return f.value.EncodeToString() + *dst = f } -// Matcher returns filter Match type. -func (f Filter) Matcher() Match { - return f.matcher +func isEmptyFilter(f Filter) bool { + return f.attrType == 0 && f.matcher == 0 && f.key == "" && f.value == "" } -// Key returns key to the filtered header. -func (f Filter) Key() string { - return f.key -} - -// From returns FilterHeaderType that defined which header will be filtered. -func (f Filter) From() FilterHeaderType { - return f.from +func filterToAPI(f Filter) *apiacl.EACLRecord_Filter { + if isEmptyFilter(f) { + return nil + } + return &apiacl.EACLRecord_Filter{ + HeaderType: apiacl.HeaderType(f.attrType), + MatchType: apiacl.MatchType(f.matcher), + Key: f.key, + Value: f.value, + } } -// ToV2 converts Filter to v2 acl.EACLRecord.Filter message. -// -// Nil Filter converts to nil. -func (f *Filter) ToV2() *v2acl.HeaderFilter { - if f == nil { - return nil +func (f *Filter) readFromV2(m *apiacl.EACLRecord_Filter, checkFieldPresence bool) error { + if checkFieldPresence && m.Key == "" { + return errors.New("missing key") } - filter := new(v2acl.HeaderFilter) - filter.SetValue(f.value.EncodeToString()) - filter.SetKey(f.key) - filter.SetMatchType(f.matcher.ToV2()) - filter.SetHeaderType(f.from.ToV2()) + f.attrType = AttributeType(m.HeaderType) + f.matcher = Match(m.MatchType) + f.key = m.Key + f.value = m.Value - return filter + return nil } -// NewFilter creates, initializes and returns blank Filter instance. +// SetValue sets value of the access-controlled resource's attribute to match. // -// Defaults: -// - header type: HeaderTypeUnknown; -// - matcher: MatchUnknown; -// - key: ""; -// - value: "". -func NewFilter() *Filter { - return NewFilterFromV2(new(v2acl.HeaderFilter)) +// See also [Filter.Value]. +func (f *Filter) SetValue(value string) { + f.value = value } -// NewFilterFromV2 converts v2 acl.EACLRecord.Filter message to Filter. -func NewFilterFromV2(filter *v2acl.HeaderFilter) *Filter { - f := new(Filter) - - if filter == nil { - return f - } - - f.from = FilterHeaderTypeFromV2(filter.GetHeaderType()) - f.matcher = MatchFromV2(filter.GetMatchType()) - f.key = filter.GetKey() - f.value = staticStringer(filter.GetValue()) - - return f +// Value returns value of the access-controlled resource's attribute to match. +// +// See also [Filter.SetValue]. +func (f Filter) Value() string { + return f.value } -// Marshal marshals Filter into a protobuf binary form. -func (f *Filter) Marshal() ([]byte, error) { - return f.ToV2().StableMarshal(nil), nil +// Matcher returns operator to match the attribute. +func (f Filter) Matcher() Match { + return f.matcher } -// Unmarshal unmarshals protobuf binary representation of Filter. -func (f *Filter) Unmarshal(data []byte) error { - fV2 := new(v2acl.HeaderFilter) - if err := fV2.Unmarshal(data); err != nil { - return err - } - - *f = *NewFilterFromV2(fV2) - - return nil +// SetMatcher sets operator to match the attribute. +// +// See also [Filter.Matcher]. +func (f *Filter) SetMatcher(m Match) { + f.matcher = m } -// MarshalJSON encodes Filter to protobuf JSON format. -func (f *Filter) MarshalJSON() ([]byte, error) { - return f.ToV2().MarshalJSON() +// Key returns key to the access-controlled resource's attribute to match. +// +// See also [Filter.SetKey]. +func (f Filter) Key() string { + return f.key } -// UnmarshalJSON decodes Filter from protobuf JSON format. -func (f *Filter) UnmarshalJSON(data []byte) error { - fV2 := new(v2acl.HeaderFilter) - if err := fV2.UnmarshalJSON(data); err != nil { - return err - } - - *f = *NewFilterFromV2(fV2) +// SetKey sets key to the access-controlled resource's attribute to match. +// +// See also [Filter.Key]. +func (f *Filter) SetKey(key string) { + f.key = key +} - return nil +// AttributeType returns type of access-controlled resource's attribute to +// match. +// +// See also [Filter.AttributeType]. +func (f Filter) AttributeType() AttributeType { + return f.attrType } -// equalFilters compares Filter with each other. -func equalFilters(f1, f2 Filter) bool { - return f1.From() == f2.From() && - f1.Matcher() == f2.Matcher() && - f1.Key() == f2.Key() && - f1.Value() == f2.Value() +// SetAttributeType sets type of access-controlled resource's attribute to match. +// +// See also [Filter.AttributeType]. +func (f *Filter) SetAttributeType(v AttributeType) { + f.attrType = v } diff --git a/eacl/filter_test.go b/eacl/filter_test.go index 274459fb7..043c99c8c 100644 --- a/eacl/filter_test.go +++ b/eacl/filter_test.go @@ -1,126 +1,78 @@ -package eacl +package eacl_test import ( - "bytes" "testing" - "github.com/nspcc-dev/neofs-api-go/v2/acl" - v2acl "github.com/nspcc-dev/neofs-api-go/v2/acl" + "github.com/nspcc-dev/neofs-sdk-go/eacl" + eacltest "github.com/nspcc-dev/neofs-sdk-go/eacl/test" "github.com/stretchr/testify/require" ) -func newObjectFilter(match Match, key, val string) *Filter { - return &Filter{ - from: HeaderFromObject, - key: key, - matcher: match, - value: staticStringer(val), - } -} - -func TestFilter(t *testing.T) { - filter := newObjectFilter(MatchStringEqual, "some name", "200") - - v2 := filter.ToV2() - require.NotNil(t, v2) - require.Equal(t, v2acl.HeaderTypeObject, v2.GetHeaderType()) - require.EqualValues(t, v2acl.MatchTypeStringEqual, v2.GetMatchType()) - require.Equal(t, filter.Key(), v2.GetKey()) - require.Equal(t, filter.Value(), v2.GetValue()) - - newFilter := NewFilterFromV2(v2) - require.Equal(t, filter, newFilter) +func TestFilter_AttributeType(t *testing.T) { + var f eacl.Filter + require.Zero(t, f.AttributeType()) - t.Run("from nil v2 filter", func(t *testing.T) { - require.Equal(t, new(Filter), NewFilterFromV2(nil)) - }) + f.SetAttributeType(13) + require.EqualValues(t, 13, f.AttributeType()) + f.SetAttributeType(42) + require.EqualValues(t, 42, f.AttributeType()) } -func TestFilterEncoding(t *testing.T) { - f := newObjectFilter(MatchStringEqual, "key", "value") - - t.Run("binary", func(t *testing.T) { - data, err := f.Marshal() - require.NoError(t, err) - - f2 := NewFilter() - require.NoError(t, f2.Unmarshal(data)) - - require.Equal(t, f, f2) - }) - - t.Run("json", func(t *testing.T) { - data, err := f.MarshalJSON() - require.NoError(t, err) - - d2 := NewFilter() - require.NoError(t, d2.UnmarshalJSON(data)) +func TestFilter_Matcher(t *testing.T) { + var f eacl.Filter + require.Zero(t, f.Matcher()) - require.Equal(t, f, d2) - }) + f.SetMatcher(13) + require.EqualValues(t, 13, f.Matcher()) + f.SetMatcher(42) + require.EqualValues(t, 42, f.Matcher()) } -func TestFilter_ToV2(t *testing.T) { - t.Run("nil", func(t *testing.T) { - var x *Filter - - require.Nil(t, x.ToV2()) - }) - - t.Run("default values", func(t *testing.T) { - filter := NewFilter() +func TestFilter_Key(t *testing.T) { + var f eacl.Filter + require.Zero(t, f.Key()) + + for _, tc := range []struct { + set, exp string + }{ + {"any_key", "any_key"}, + {eacl.FilterObjectVersion, "$Object:version"}, + {eacl.FilterObjectID, "$Object:objectID"}, + {eacl.FilterObjectContainerID, "$Object:containerID"}, + {eacl.FilterObjectContainerID, "$Object:containerID"}, + {eacl.FilterObjectOwnerID, "$Object:ownerID"}, + {eacl.FilterObjectCreationEpoch, "$Object:creationEpoch"}, + {eacl.FilterObjectPayloadSize, "$Object:payloadLength"}, + {eacl.FilterObjectPayloadChecksum, "$Object:payloadHash"}, + {eacl.FilterObjectType, "$Object:objectType"}, + {eacl.FilterObjectPayloadHomomorphicChecksum, "$Object:homomorphicHash"}, + } { + f.SetKey(tc.set) + require.EqualValues(t, tc.exp, f.Key(), tc) + } +} - // check initial values - require.Empty(t, filter.Key()) - require.Empty(t, filter.Value()) - require.Equal(t, HeaderTypeUnknown, filter.From()) - require.Equal(t, MatchUnknown, filter.Matcher()) +func TestFilter_Value(t *testing.T) { + var f eacl.Filter + require.Zero(t, f.Value()) - // convert to v2 message - filterV2 := filter.ToV2() + f.SetValue("any_value") + require.EqualValues(t, "any_value", f.Value()) - require.Empty(t, filterV2.GetKey()) - require.Empty(t, filterV2.GetValue()) - require.Equal(t, acl.HeaderTypeUnknown, filterV2.GetHeaderType()) - require.Equal(t, acl.MatchTypeUnknown, filterV2.GetMatchType()) - }) + f.SetValue("other_value") + require.EqualValues(t, "other_value", f.Value()) } func TestFilter_CopyTo(t *testing.T) { - var filter Filter - filter.value = staticStringer("value") - filter.from = 1 - filter.matcher = 1 - filter.key = "1" - - var dst Filter - t.Run("copy", func(t *testing.T) { - filter.CopyTo(&dst) - - bts, err := filter.Marshal() - require.NoError(t, err) - - bts2, err := dst.Marshal() - require.NoError(t, err) - - require.Equal(t, filter, dst) - require.True(t, bytes.Equal(bts, bts2)) - }) - - t.Run("change", func(t *testing.T) { - require.Equal(t, filter.value, dst.value) - require.Equal(t, filter.from, dst.from) - require.Equal(t, filter.matcher, dst.matcher) - require.Equal(t, filter.key, dst.key) + src := eacltest.Filter() - dst.value = staticStringer("value2") - dst.from = 2 - dst.matcher = 2 - dst.key = "2" + var dst eacl.Filter + src.CopyTo(&dst) + require.Equal(t, src, dst) - require.NotEqual(t, filter.value, dst.value) - require.NotEqual(t, filter.from, dst.from) - require.NotEqual(t, filter.matcher, dst.matcher) - require.NotEqual(t, filter.key, dst.key) - }) + originKey := src.Key() + otherKey := originKey + "_extra" + src.SetKey(otherKey) + require.EqualValues(t, otherKey, src.Key()) + require.EqualValues(t, originKey, dst.Key()) } diff --git a/eacl/record.go b/eacl/record.go index 00d38d737..f6bd4ace5 100644 --- a/eacl/record.go +++ b/eacl/record.go @@ -1,342 +1,177 @@ package eacl import ( - "crypto/ecdsa" + "errors" + "fmt" - v2acl "github.com/nspcc-dev/neofs-api-go/v2/acl" - "github.com/nspcc-dev/neofs-sdk-go/checksum" - cid "github.com/nspcc-dev/neofs-sdk-go/container/id" - "github.com/nspcc-dev/neofs-sdk-go/object" - oid "github.com/nspcc-dev/neofs-sdk-go/object/id" - "github.com/nspcc-dev/neofs-sdk-go/user" - "github.com/nspcc-dev/neofs-sdk-go/version" + apiacl "github.com/nspcc-dev/neofs-sdk-go/api/acl" + "github.com/nspcc-dev/neofs-sdk-go/container/acl" ) -// Record of the ContainerEACL rule, that defines ContainerEACL action, targets for this action, -// object service operation and filters for request headers. -// -// Record is compatible with v2 acl.EACLRecord message. +// Record represents an access rule operating in NeoFS access management. The +// rule is applied when some party requests access to a certain NeoFS resource. +// Record is a structural descriptor: see [Table] for detailed behavior. type Record struct { action Action - operation Operation + operation acl.Op filters []Filter targets []Target } -// CopyTo writes deep copy of the [Record] to dst. -func (r Record) CopyTo(dst *Record) { - dst.action = r.action - dst.operation = r.operation - - dst.filters = make([]Filter, len(r.filters)) - copy(dst.filters, r.filters) - - dst.targets = make([]Target, len(r.targets)) - for i, t := range r.targets { - var newTarget Target - t.CopyTo(&newTarget) - - dst.targets[i] = newTarget - } -} - -// Targets returns list of target subjects to apply ACL rule to. -// -// The value returned shares memory with the structure itself, so changing it can lead to data corruption. -// Make a copy if you need to change it. -func (r Record) Targets() []Target { - return r.targets -} - -// SetTargets sets list of target subjects to apply ACL rule to. -func (r *Record) SetTargets(targets ...Target) { - r.targets = targets -} - -// Filters returns list of filters to match and see if rule is applicable. -// -// The value returned shares memory with the structure itself, so changing it can lead to data corruption. -// Make a copy if you need to change it. -func (r Record) Filters() []Filter { - return r.filters -} - -// Operation returns NeoFS request verb to match. -func (r Record) Operation() Operation { - return r.operation +func isEmptyRecord(r Record) bool { + return r.action == 0 && r.operation == 0 && len(r.filters) == 0 && len(r.targets) == 0 } -// SetOperation sets NeoFS request verb to match. -func (r *Record) SetOperation(operation Operation) { - r.operation = operation -} - -// Action returns rule execution result. -func (r Record) Action() Action { - return r.action -} +func recordToAPI(r Record) *apiacl.EACLRecord { + if isEmptyRecord(r) { + return nil + } -// SetAction sets rule execution result. -func (r *Record) SetAction(action Action) { - r.action = action -} + m := &apiacl.EACLRecord{ + Operation: apiacl.Operation(r.operation), + Action: apiacl.Action(r.action), + } -// AddRecordTarget adds single Target to the Record. -func AddRecordTarget(r *Record, t *Target) { - r.SetTargets(append(r.Targets(), *t)...) -} + if r.filters != nil { + m.Filters = make([]*apiacl.EACLRecord_Filter, len(r.filters)) + for i := range r.filters { + m.Filters[i] = filterToAPI(r.filters[i]) + } + } else { + m.Filters = nil + } -// AddFormedTarget forms Target with specified Role and list of -// ECDSA public keys and adds it to the Record. -func AddFormedTarget(r *Record, role Role, keys ...ecdsa.PublicKey) { - t := NewTarget() - t.SetRole(role) + if r.targets != nil { + m.Targets = make([]*apiacl.EACLRecord_Target, len(r.targets)) + for i := range r.targets { + m.Targets[i] = targetToAPI(r.targets[i]) + } + } else { + m.Targets = nil + } - SetTargetECDSAKeys(t, ecdsaKeysToPtrs(keys)...) - AddRecordTarget(r, t) + return m } -type stringEncoder interface { - EncodeToString() string -} +func (r *Record) readFromV2(m *apiacl.EACLRecord, checkFieldPresence bool) error { + var err error + if len(m.Targets) > 0 { + r.targets = make([]Target, len(m.Targets)) + for i := range m.Targets { + if m.Targets[i] != nil { + err = r.targets[i].readFromV2(m.Targets[i], checkFieldPresence) + if err != nil { + return fmt.Errorf("invalid target #%d: %w", i, err) + } + } + } + } else if checkFieldPresence { + return errors.New("missing target subjects") + } -func (r *Record) addFilter(from FilterHeaderType, m Match, key string, val stringEncoder) { - filter := Filter{ - from: from, - key: key, - matcher: m, - value: val, + if m.Filters != nil { + r.filters = make([]Filter, len(m.Filters)) + for i := range m.Filters { + if m.Filters[i] != nil { + err = r.filters[i].readFromV2(m.Filters[i], checkFieldPresence) + if err != nil { + return fmt.Errorf("invalid filter #%d: %w", i, err) + } + } + } + } else { + r.filters = nil } - r.filters = append(r.filters, filter) -} + r.action = Action(m.Action) + r.operation = acl.Op(m.Operation) -func (r *Record) addObjectFilter(m Match, key string, val stringEncoder) { - r.addFilter(HeaderFromObject, m, key, val) + return nil } -// AddFilter adds generic filter. -// -// If matcher is [MatchNotPresent], the value must be empty. If matcher is -// numeric (e.g. [MatchNumGT]), value must be a base-10 integer. -func (r *Record) AddFilter(from FilterHeaderType, matcher Match, name, value string) { - r.addFilter(from, matcher, name, staticStringer(value)) -} +// CopyTo writes deep copy of the [Record] to dst. +func (r Record) CopyTo(dst *Record) { + dst.action = r.action + dst.operation = r.operation -// AddObjectAttributeFilter adds filter by object attribute. -// -// If m is [MatchNotPresent], the value must be empty. If matcher is numeric -// (e.g. [MatchNumGT]), value must be a base-10 integer. -func (r *Record) AddObjectAttributeFilter(m Match, key, value string) { - r.addObjectFilter(m, key, staticStringer(value)) -} + if r.filters != nil { + dst.filters = make([]Filter, len(r.filters)) + copy(dst.filters, r.filters) + } else { + dst.filters = nil + } -// AddObjectVersionFilter adds filter by object version. -// -// The m must not be [MatchNotPresent] or numeric (e.g. [MatchNumGT]). -func (r *Record) AddObjectVersionFilter(m Match, v *version.Version) { - r.addObjectFilter(m, FilterObjectVersion, staticStringer(version.EncodeToString(*v))) + if r.targets != nil { + dst.targets = make([]Target, len(r.targets)) + for i := range r.targets { + r.targets[i].CopyTo(&dst.targets[i]) + } + } else { + dst.targets = nil + } } -// AddObjectIDFilter adds filter by object ID. +// Targets returns list of target subjects for which this access rule applies. // -// The m must not be [MatchNotPresent] or numeric (e.g. [MatchNumGT]). -func (r *Record) AddObjectIDFilter(m Match, id oid.ID) { - r.addObjectFilter(m, FilterObjectID, id) -} - -// AddObjectContainerIDFilter adds filter by object container ID. +// The value returned shares memory with the structure itself, so changing it can lead to data corruption. +// Make a copy if you need to change it. // -// The m must not be [MatchNotPresent] or numeric (e.g. [MatchNumGT]). -func (r *Record) AddObjectContainerIDFilter(m Match, id cid.ID) { - r.addObjectFilter(m, FilterObjectContainerID, id) +// See also [Record.SetTargets]. +func (r Record) Targets() []Target { + return r.targets } -// AddObjectOwnerIDFilter adds filter by object owner ID. +// SetTargets sets list of target subjects for which this access rule applies. // -// The m must not be [MatchNotPresent] or numeric (e.g. [MatchNumGT]). -func (r *Record) AddObjectOwnerIDFilter(m Match, id *user.ID) { - r.addObjectFilter(m, FilterObjectOwnerID, id) +// See also [Record.Targets]. +func (r *Record) SetTargets(targets []Target) { + r.targets = targets } -// AddObjectCreationEpoch adds filter by object creation epoch. +// Filters returns list of filters to match the requested resource to this +// access rule. Zero rule has no filters which makes it applicable to any +// resource. // -// The m must not be [MatchNotPresent]. -func (r *Record) AddObjectCreationEpoch(m Match, epoch uint64) { - r.addObjectFilter(m, FilterObjectCreationEpoch, u64Stringer(epoch)) -} - -// AddObjectPayloadLengthFilter adds filter by object payload length. +// The value returned shares memory with the structure itself, so changing it can lead to data corruption. +// Make a copy if you need to change it. // -// The m must not be [MatchNotPresent]. -func (r *Record) AddObjectPayloadLengthFilter(m Match, size uint64) { - r.addObjectFilter(m, FilterObjectPayloadSize, u64Stringer(size)) +// See also [Record.SetFilters]. +func (r Record) Filters() []Filter { + return r.filters } -// AddObjectPayloadHashFilter adds filter by object payload hash value. +// SetFilters returns list of filters to match the requested resource to this +// access rule. Empty list applies the rule to all resources. // -// The m must not be [MatchNotPresent] or numeric (e.g. [MatchNumGT]). -func (r *Record) AddObjectPayloadHashFilter(m Match, h checksum.Checksum) { - r.addObjectFilter(m, FilterObjectPayloadChecksum, staticStringer(h.String())) +// See also [Record.Filters]. +func (r *Record) SetFilters(fs []Filter) { + r.filters = fs } -// AddObjectTypeFilter adds filter by object type. +// Operation returns operation executed by the subject to match. // -// The m must not be [MatchNotPresent] or numeric (e.g. [MatchNumGT]). -func (r *Record) AddObjectTypeFilter(m Match, t object.Type) { - r.addObjectFilter(m, FilterObjectType, staticStringer(t.EncodeToString())) +// See also [Record.SetOperation]. +func (r Record) Operation() acl.Op { + return r.operation } -// AddObjectHomomorphicHashFilter adds filter by object payload homomorphic hash value. +// SetOperation sets operation executed by the subject to match. // -// The m must not be [MatchNotPresent] or numeric (e.g. [MatchNumGT]). -func (r *Record) AddObjectHomomorphicHashFilter(m Match, h checksum.Checksum) { - r.addObjectFilter(m, FilterObjectPayloadHomomorphicChecksum, staticStringer(h.String())) +// See also [Record.SetOperation]. +func (r *Record) SetOperation(operation acl.Op) { + r.operation = operation } -// ToV2 converts Record to v2 acl.EACLRecord message. +// Action returns action on the target subject when the access rule matches. // -// Nil Record converts to nil. -func (r *Record) ToV2() *v2acl.Record { - if r == nil { - return nil - } - - v2 := new(v2acl.Record) - - if r.targets != nil { - targets := make([]v2acl.Target, len(r.targets)) - for i := range r.targets { - targets[i] = *r.targets[i].ToV2() - } - - v2.SetTargets(targets) - } - - if r.filters != nil { - filters := make([]v2acl.HeaderFilter, len(r.filters)) - for i := range r.filters { - filters[i] = *r.filters[i].ToV2() - } - - v2.SetFilters(filters) - } - - v2.SetAction(r.action.ToV2()) - v2.SetOperation(r.operation.ToV2()) - - return v2 +// See also [Record.SetAction]. +func (r Record) Action() Action { + return r.action } -// NewRecord creates and returns blank Record instance. +// SetAction sets action on the target subject when the access rule matches. // -// Defaults: -// - action: ActionUnknown; -// - operation: OperationUnknown; -// - targets: nil, -// - filters: nil. -func NewRecord() *Record { - return new(Record) -} - -// CreateRecord creates, initializes with parameters and returns Record instance. -func CreateRecord(action Action, operation Operation) *Record { - r := NewRecord() +// See also [Record.Action]. +func (r *Record) SetAction(action Action) { r.action = action - r.operation = operation - r.targets = []Target{} - r.filters = []Filter{} - - return r -} - -// NewRecordFromV2 converts v2 acl.EACLRecord message to Record. -func NewRecordFromV2(record *v2acl.Record) *Record { - r := NewRecord() - - if record == nil { - return r - } - - r.action = ActionFromV2(record.GetAction()) - r.operation = OperationFromV2(record.GetOperation()) - - v2targets := record.GetTargets() - v2filters := record.GetFilters() - - r.targets = make([]Target, len(v2targets)) - for i := range v2targets { - r.targets[i] = *NewTargetFromV2(&v2targets[i]) - } - - r.filters = make([]Filter, len(v2filters)) - for i := range v2filters { - r.filters[i] = *NewFilterFromV2(&v2filters[i]) - } - - return r -} - -// Marshal marshals Record into a protobuf binary form. -func (r *Record) Marshal() ([]byte, error) { - return r.ToV2().StableMarshal(nil), nil -} - -// Unmarshal unmarshals protobuf binary representation of Record. -func (r *Record) Unmarshal(data []byte) error { - fV2 := new(v2acl.Record) - if err := fV2.Unmarshal(data); err != nil { - return err - } - - *r = *NewRecordFromV2(fV2) - - return nil -} - -// MarshalJSON encodes Record to protobuf JSON format. -func (r *Record) MarshalJSON() ([]byte, error) { - return r.ToV2().MarshalJSON() -} - -// UnmarshalJSON decodes Record from protobuf JSON format. -func (r *Record) UnmarshalJSON(data []byte) error { - tV2 := new(v2acl.Record) - if err := tV2.UnmarshalJSON(data); err != nil { - return err - } - - *r = *NewRecordFromV2(tV2) - - return nil -} - -// equalRecords compares Record with each other. -func equalRecords(r1, r2 Record) bool { - if r1.Operation() != r2.Operation() || - r1.Action() != r2.Action() { - return false - } - - fs1, fs2 := r1.Filters(), r2.Filters() - ts1, ts2 := r1.Targets(), r2.Targets() - - if len(fs1) != len(fs2) || - len(ts1) != len(ts2) { - return false - } - - for i := 0; i < len(fs1); i++ { - if !equalFilters(fs1[i], fs2[i]) { - return false - } - } - - for i := 0; i < len(ts1); i++ { - if !equalTargets(ts1[i], ts2[i]) { - return false - } - } - - return true } diff --git a/eacl/record_test.go b/eacl/record_test.go index ea2c25cc0..3c20e9a8e 100644 --- a/eacl/record_test.go +++ b/eacl/record_test.go @@ -1,319 +1,104 @@ -package eacl +package eacl_test import ( "bytes" - "crypto/ecdsa" - "fmt" "testing" - "github.com/nspcc-dev/neo-go/pkg/crypto/keys" - v2acl "github.com/nspcc-dev/neofs-api-go/v2/acl" - checksumtest "github.com/nspcc-dev/neofs-sdk-go/checksum/test" - cidtest "github.com/nspcc-dev/neofs-sdk-go/container/id/test" - "github.com/nspcc-dev/neofs-sdk-go/object" - oidtest "github.com/nspcc-dev/neofs-sdk-go/object/id/test" - usertest "github.com/nspcc-dev/neofs-sdk-go/user/test" - versiontest "github.com/nspcc-dev/neofs-sdk-go/version/test" + "github.com/nspcc-dev/neofs-sdk-go/eacl" + eacltest "github.com/nspcc-dev/neofs-sdk-go/eacl/test" "github.com/stretchr/testify/require" ) -func TestRecord(t *testing.T) { - record := NewRecord() - record.SetOperation(OperationRange) - record.SetAction(ActionAllow) - record.AddFilter(HeaderFromRequest, MatchStringEqual, "A", "B") - record.AddFilter(HeaderFromRequest, MatchStringNotEqual, "C", "D") +func TestRecord_Action(t *testing.T) { + var r eacl.Record + require.Zero(t, r.Action()) - target := NewTarget() - target.SetRole(RoleSystem) - AddRecordTarget(record, target) - - v2 := record.ToV2() - require.NotNil(t, v2) - require.Equal(t, v2acl.OperationRange, v2.GetOperation()) - require.Equal(t, v2acl.ActionAllow, v2.GetAction()) - require.Len(t, v2.GetFilters(), len(record.Filters())) - require.Len(t, v2.GetTargets(), len(record.Targets())) - - newRecord := NewRecordFromV2(v2) - require.Equal(t, record, newRecord) - - t.Run("create record", func(t *testing.T) { - record := CreateRecord(ActionAllow, OperationGet) - require.Equal(t, ActionAllow, record.Action()) - require.Equal(t, OperationGet, record.Operation()) - }) - - t.Run("new from nil v2 record", func(t *testing.T) { - require.Equal(t, new(Record), NewRecordFromV2(nil)) - }) + r.SetAction(13) + require.EqualValues(t, 13, r.Action()) + r.SetAction(42) + require.EqualValues(t, 42, r.Action()) } -func TestAddFormedTarget(t *testing.T) { - items := []struct { - role Role - keys []ecdsa.PublicKey - }{ - { - role: RoleUnknown, - keys: []ecdsa.PublicKey{*randomPublicKey(t)}, - }, - { - role: RoleSystem, - keys: []ecdsa.PublicKey{}, - }, - } - - targets := make([]Target, len(items)) +func TestRecord_Operation(t *testing.T) { + var r eacl.Record + require.Zero(t, r.Operation()) - r := NewRecord() - - for i := range items { - targets[i].SetRole(items[i].role) - SetTargetECDSAKeys(&targets[i], ecdsaKeysToPtrs(items[i].keys)...) - AddFormedTarget(r, items[i].role, items[i].keys...) - } - - tgts := r.Targets() - require.Len(t, tgts, len(targets)) - - for _, tgt := range targets { - require.Contains(t, tgts, tgt) - } + r.SetOperation(13) + require.EqualValues(t, 13, r.Operation()) + r.SetOperation(42) + require.EqualValues(t, 42, r.Operation()) } -func TestRecord_AddFilter(t *testing.T) { - filters := []Filter{ - *newObjectFilter(MatchStringEqual, "some name", "ContainerID"), - *newObjectFilter(MatchStringNotEqual, "X-Header-Name", "X-Header-Value"), - } - - r := NewRecord() - for _, filter := range filters { - r.AddFilter(filter.From(), filter.Matcher(), filter.Key(), filter.Value()) - } - - require.Equal(t, filters, r.Filters()) +func TestRecord_Filters(t *testing.T) { + var r eacl.Record + require.Zero(t, r.Filters()) + + fs := make([]eacl.Filter, 2) + fs[0].SetKey("key1") + fs[1].SetKey("key2") + fs[0].SetValue("val1") + fs[1].SetValue("val2") + fs[0].SetAttributeType(1) + fs[1].SetAttributeType(2) + fs[0].SetMatcher(3) + fs[1].SetMatcher(4) + + r.SetFilters(fs) + require.Equal(t, fs, r.Filters()) + + fs = make([]eacl.Filter, 2) + fs[0].SetKey("key3") + fs[1].SetKey("key4") + fs[0].SetValue("val3") + fs[1].SetValue("val4") + fs[0].SetAttributeType(4) + fs[1].SetAttributeType(6) + fs[0].SetMatcher(7) + fs[1].SetMatcher(8) + + r.SetFilters(fs) + require.Equal(t, fs, r.Filters()) } -func TestRecordEncoding(t *testing.T) { - r := NewRecord() - r.SetOperation(OperationHead) - r.SetAction(ActionDeny) - r.AddObjectAttributeFilter(MatchStringEqual, "key", "value") - AddFormedTarget(r, RoleSystem, *randomPublicKey(t)) - - t.Run("binary", func(t *testing.T) { - data, err := r.Marshal() - require.NoError(t, err) - - r2 := NewRecord() - require.NoError(t, r2.Unmarshal(data)) - - require.Equal(t, r, r2) - }) - - t.Run("json", func(t *testing.T) { - data, err := r.MarshalJSON() - require.NoError(t, err) - - r2 := NewRecord() - require.NoError(t, r2.UnmarshalJSON(data)) - - require.Equal(t, r, r2) - }) -} - -func TestRecord_ToV2(t *testing.T) { - t.Run("nil", func(t *testing.T) { - var x *Record - - require.Nil(t, x.ToV2()) - }) +func TestRecord_Targets(t *testing.T) { + var r eacl.Record + require.Zero(t, r.Targets()) - t.Run("default values", func(t *testing.T) { - record := NewRecord() + ts := make([]eacl.Target, 2) + ts[0].SetRole(1) + ts[1].SetRole(2) + ts[0].SetPublicKeys([][]byte{[]byte("key1"), []byte("key2")}) + ts[1].SetPublicKeys([][]byte{[]byte("key3"), []byte("key4")}) - // check initial values - require.Equal(t, OperationUnknown, record.Operation()) - require.Equal(t, ActionUnknown, record.Action()) - require.Nil(t, record.Targets()) - require.Nil(t, record.Filters()) + r.SetTargets(ts) + require.Equal(t, ts, r.Targets()) - // convert to v2 message - recordV2 := record.ToV2() + ts = make([]eacl.Target, 2) + ts[0].SetRole(3) + ts[1].SetRole(4) + ts[0].SetPublicKeys([][]byte{[]byte("key5"), []byte("key6")}) + ts[1].SetPublicKeys([][]byte{[]byte("key7"), []byte("key8")}) - require.Equal(t, v2acl.OperationUnknown, recordV2.GetOperation()) - require.Equal(t, v2acl.ActionUnknown, recordV2.GetAction()) - require.Nil(t, recordV2.GetTargets()) - require.Nil(t, recordV2.GetFilters()) - }) -} - -func TestReservedRecords(t *testing.T) { - var ( - v = versiontest.Version() - oid = oidtest.ID() - cid = cidtest.ID() - ownerid = usertest.ID(t) - h = checksumtest.Checksum() - typ = new(object.Type) - ) - - testSuit := []struct { - f func(r *Record) - key string - value string - }{ - { - f: func(r *Record) { r.AddObjectAttributeFilter(MatchStringEqual, "foo", "bar") }, - key: "foo", - value: "bar", - }, - { - f: func(r *Record) { r.AddObjectVersionFilter(MatchStringEqual, &v) }, - key: v2acl.FilterObjectVersion, - value: v.String(), - }, - { - f: func(r *Record) { r.AddObjectIDFilter(MatchStringEqual, oid) }, - key: v2acl.FilterObjectID, - value: oid.EncodeToString(), - }, - { - f: func(r *Record) { r.AddObjectContainerIDFilter(MatchStringEqual, cid) }, - key: v2acl.FilterObjectContainerID, - value: cid.EncodeToString(), - }, - { - f: func(r *Record) { r.AddObjectOwnerIDFilter(MatchStringEqual, &ownerid) }, - key: v2acl.FilterObjectOwnerID, - value: ownerid.EncodeToString(), - }, - { - f: func(r *Record) { r.AddObjectCreationEpoch(MatchStringEqual, 100) }, - key: v2acl.FilterObjectCreationEpoch, - value: "100", - }, - { - f: func(r *Record) { r.AddObjectPayloadLengthFilter(MatchStringEqual, 5000) }, - key: v2acl.FilterObjectPayloadLength, - value: "5000", - }, - { - f: func(r *Record) { r.AddObjectPayloadHashFilter(MatchStringEqual, h) }, - key: v2acl.FilterObjectPayloadHash, - value: h.String(), - }, - { - f: func(r *Record) { r.AddObjectHomomorphicHashFilter(MatchStringEqual, h) }, - key: v2acl.FilterObjectHomomorphicHash, - value: h.String(), - }, - { - f: func(r *Record) { - require.True(t, typ.DecodeString("REGULAR")) - r.AddObjectTypeFilter(MatchStringEqual, *typ) - }, - key: v2acl.FilterObjectType, - value: "REGULAR", - }, - { - f: func(r *Record) { - require.True(t, typ.DecodeString("TOMBSTONE")) - r.AddObjectTypeFilter(MatchStringEqual, *typ) - }, - key: v2acl.FilterObjectType, - value: "TOMBSTONE", - }, - { - f: func(r *Record) { - require.True(t, typ.DecodeString("STORAGE_GROUP")) - r.AddObjectTypeFilter(MatchStringEqual, *typ) - }, - key: v2acl.FilterObjectType, - value: "STORAGE_GROUP", - }, - } - - for n, testCase := range testSuit { - desc := fmt.Sprintf("case #%d", n) - record := NewRecord() - testCase.f(record) - require.Len(t, record.Filters(), 1, desc) - f := record.Filters()[0] - require.Equal(t, f.Key(), testCase.key, desc) - require.Equal(t, f.Value(), testCase.value, desc) - } -} - -func randomPublicKey(t *testing.T) *ecdsa.PublicKey { - p, err := keys.NewPrivateKey() - require.NoError(t, err) - return &p.PrivateKey.PublicKey + r.SetTargets(ts) + require.Equal(t, ts, r.Targets()) } func TestRecord_CopyTo(t *testing.T) { - var record Record - record.action = ActionAllow - record.operation = OperationPut - record.AddObjectAttributeFilter(MatchStringEqual, "key", "value") - - var target Target - target.SetRole(1) - target.SetBinaryKeys([][]byte{ - {1, 2, 3}, - }) - - record.SetTargets(target) - record.AddObjectAttributeFilter(MatchStringEqual, "key", "value") - - t.Run("copy", func(t *testing.T) { - var dst Record - record.CopyTo(&dst) - - bts, err := record.Marshal() - require.NoError(t, err) - - bts2, err := dst.Marshal() - require.NoError(t, err) - - require.Equal(t, record, dst) - require.True(t, bytes.Equal(bts, bts2)) - }) - - t.Run("change filters", func(t *testing.T) { - var dst Record - record.CopyTo(&dst) - - require.Equal(t, record.filters[0].key, dst.filters[0].key) - require.Equal(t, record.filters[0].matcher, dst.filters[0].matcher) - require.Equal(t, record.filters[0].value, dst.filters[0].value) - require.Equal(t, record.filters[0].from, dst.filters[0].from) - - dst.filters[0].key = "key2" - dst.filters[0].matcher = MatchStringNotEqual - dst.filters[0].value = staticStringer("staticStringer") - dst.filters[0].from = 12345 - - require.NotEqual(t, record.filters[0].key, dst.filters[0].key) - require.NotEqual(t, record.filters[0].matcher, dst.filters[0].matcher) - require.NotEqual(t, record.filters[0].value, dst.filters[0].value) - require.NotEqual(t, record.filters[0].from, dst.filters[0].from) - }) - - t.Run("change target", func(t *testing.T) { - var dst Record - record.CopyTo(&dst) - - require.Equal(t, record.targets[0].role, dst.targets[0].role) - dst.targets[0].role = 12345 - require.NotEqual(t, record.targets[0].role, dst.targets[0].role) - - for i, key := range dst.targets[0].keys { - require.True(t, bytes.Equal(key, record.targets[0].keys[i])) - key[0] = 10 - require.False(t, bytes.Equal(key, record.targets[0].keys[i])) - } - }) + ts := eacltest.NTargets(2) + ts[0].SetPublicKeys([][]byte{[]byte("key1"), []byte("key2")}) + src := eacltest.Record() + src.SetTargets(ts) + + var dst eacl.Record + src.CopyTo(&dst) + require.Equal(t, src, dst) + + originKey := src.Filters()[0].Key() + src.Filters()[0].SetKey(originKey + "_extra") + require.Equal(t, originKey+"_extra", src.Filters()[0].Key()) + require.Equal(t, originKey, dst.Filters()[0].Key()) + + originPubKey := bytes.Clone(src.Targets()[0].PublicKeys()[0]) + src.Targets()[0].PublicKeys()[0][0]++ + require.Equal(t, originPubKey, dst.Targets()[0].PublicKeys()[0]) } diff --git a/eacl/table.go b/eacl/table.go index 613db68d5..f4d583f80 100644 --- a/eacl/table.go +++ b/eacl/table.go @@ -1,267 +1,234 @@ package eacl import ( - "crypto/sha256" + "errors" "fmt" - v2acl "github.com/nspcc-dev/neofs-api-go/v2/acl" - "github.com/nspcc-dev/neofs-api-go/v2/refs" + "github.com/nspcc-dev/neofs-sdk-go/api/acl" + "github.com/nspcc-dev/neofs-sdk-go/api/refs" cid "github.com/nspcc-dev/neofs-sdk-go/container/id" "github.com/nspcc-dev/neofs-sdk-go/version" + "google.golang.org/protobuf/encoding/protojson" + "google.golang.org/protobuf/proto" ) -// Table is a group of ContainerEACL records for single container. +// Table represents NeoFS extended ACL (eACL): group of rules managing access to +// NeoFS resources in addition to the basic ACL. // -// Table is compatible with v2 acl.EACLTable message. +// Table is mutually compatible with [acl.EACLTable] message. See +// [Table.ReadFromV2] / [Table.WriteToV2] methods. type Table struct { - version version.Version - cid *cid.ID + decoded bool + + versionSet bool + version version.Version + + cnrSet bool + cnr cid.ID + records []Record } // CopyTo writes deep copy of the [Table] to dst. func (t Table) CopyTo(dst *Table) { - ver := t.version - dst.version = ver + dst.decoded = t.decoded + dst.version = t.version + dst.cnr, dst.cnrSet = t.cnr, t.cnrSet - if t.cid != nil { - id := *t.cid - dst.cid = &id + if t.records != nil { + dst.records = make([]Record, len(t.records)) + for i := range t.records { + t.records[i].CopyTo(&dst.records[i]) + } } else { - dst.cid = nil - } - - dst.records = make([]Record, len(t.records)) - for i := range t.records { - t.records[i].CopyTo(&dst.records[i]) + dst.records = nil } } -// CID returns identifier of the container that should use given access control rules. -func (t Table) CID() (cID cid.ID, isSet bool) { - if t.cid != nil { - cID = *t.cid - isSet = true +// LimitedContainer returns identifier of the NeoFS container to which the eACL +// scope is limited. Zero return means the eACL may be applied to any container. +// +// See also [Table.LimitToContainer]. +func (t Table) LimitedContainer() cid.ID { + if t.cnrSet { + return t.cnr } - - return + return cid.ID{} } -// SetCID sets identifier of the container that should use given access control rules. -func (t *Table) SetCID(cid cid.ID) { - t.cid = &cid -} - -// Version returns version of eACL format. -func (t Table) Version() version.Version { - return t.version -} - -// SetVersion sets version of eACL format. -func (t *Table) SetVersion(version version.Version) { - t.version = version +// LimitToContainer limits scope of the eACL to a referenced container. By +// default, the eACL is applicable to any container. +// +// See also [Table.LimitedContainer]. +func (t *Table) LimitToContainer(cnr cid.ID) { + t.cnr = cnr + t.cnrSet = true } // Records returns list of extended ACL rules. // // The value returned shares memory with the structure itself, so changing it can lead to data corruption. // Make a copy if you need to change it. +// +// See also [Table.SetRecords]. func (t Table) Records() []Record { return t.records } -// AddRecord adds single eACL rule. -func (t *Table) AddRecord(r *Record) { - if r != nil { - t.records = append(t.records, *r) - } +// SetRecords list of extended ACL rules. +// +// See also [Table.Records]. +func (t *Table) SetRecords(rs []Record) { + t.records = rs } -// ToV2 converts Table to v2 acl.EACLTable message. +// WriteToV2 writes Table to the [acl.EACLTable] message of the NeoFS API +// protocol. +// +// WriteToV2 writes current protocol version into the resulting message if +// Result hasn't been already decoded from such a message. // -// Nil Table converts to nil. -func (t *Table) ToV2() *v2acl.Table { - if t == nil { - return nil +// WriteToV2 is intended to be used by the NeoFS API V2 client/server +// implementation only and is not expected to be directly used by applications. +// +// See also [Table.ReadFromV2]. +func (t Table) WriteToV2(m *acl.EACLTable) { + if t.versionSet { + m.Version = new(refs.Version) + t.version.WriteToV2(m.Version) + } else if !t.decoded { + m.Version = new(refs.Version) + version.Current.WriteToV2(m.Version) + } else { + m.Version = nil } - - v2 := new(v2acl.Table) - var cidV2 refs.ContainerID - - if t.cid != nil { - t.cid.WriteToV2(&cidV2) - v2.SetContainerID(&cidV2) + if t.cnrSet { + m.ContainerId = new(refs.ContainerID) + t.cnr.WriteToV2(m.ContainerId) } - if t.records != nil { - records := make([]v2acl.Record, len(t.records)) + m.Records = make([]*acl.EACLRecord, len(t.records)) for i := range t.records { - records[i] = *t.records[i].ToV2() + m.Records[i] = recordToAPI(t.records[i]) } - - v2.SetRecords(records) + } else { + m.Records = nil } - - var verV2 refs.Version - t.version.WriteToV2(&verV2) - v2.SetVersion(&verV2) - - return v2 -} - -// NewTable creates, initializes and returns blank Table instance. -// -// Defaults: -// - version: version.Current(); -// - container ID: nil; -// - records: nil; -// - session token: nil; -// - signature: nil. -func NewTable() *Table { - t := new(Table) - t.SetVersion(version.Current()) - - return t } -// CreateTable creates, initializes with parameters and returns Table instance. -func CreateTable(cid cid.ID) *Table { - t := NewTable() - t.SetCID(cid) - - return t -} - -// NewTableFromV2 converts v2 acl.EACLTable message to Table. -func NewTableFromV2(table *v2acl.Table) *Table { - t := new(Table) - - if table == nil { - return t +func (t *Table) readFromV2(m *acl.EACLTable, checkFieldPresence bool) error { + var err error + t.cnrSet = m.ContainerId != nil + if t.cnrSet { + err = t.cnr.ReadFromV2(m.ContainerId) + if err != nil { + return fmt.Errorf("invalid container: %w", err) + } } - // set version - if v := table.GetVersion(); v != nil { - ver := version.Version{} - ver.SetMajor(v.GetMajor()) - ver.SetMinor(v.GetMinor()) - - t.SetVersion(ver) + t.versionSet = m.Version != nil + if t.versionSet { + err = t.version.ReadFromV2(m.Version) + if err != nil { + return fmt.Errorf("invalid version: %w", err) + } } - // set container id - if id := table.GetContainerID(); id != nil { - if t.cid == nil { - t.cid = new(cid.ID) + if len(m.Records) > 0 { + t.records = make([]Record, len(m.Records)) + for i := range m.Records { + if m.Records[i] != nil { + err = t.records[i].readFromV2(m.Records[i], checkFieldPresence) + if err != nil { + return fmt.Errorf("invalid record #%d: %w", i, err) + } + } } - - var h [sha256.Size]byte - - copy(h[:], id.GetValue()) - t.cid.SetSHA256(h) + } else if checkFieldPresence { + return errors.New("missing records") + } else { + t.records = nil } - // set eacl records - v2records := table.GetRecords() - t.records = make([]Record, len(v2records)) + t.decoded = true - for i := range v2records { - t.records[i] = *NewRecordFromV2(&v2records[i]) - } + return nil +} - return t +// ReadFromV2 reads Table from the [acl.EACLTable] message. Returns an error if +// the message is malformed according to the NeoFS API V2 protocol. The message +// must not be nil. +// +// ReadFromV2 is intended to be used by the NeoFS API V2 client/server +// implementation only and is not expected to be directly used by applications. +// +// See also [Table.WriteToV2]. +func (t *Table) ReadFromV2(m *acl.EACLTable) error { + return t.readFromV2(m, true) } -// Marshal marshals Table into a protobuf binary form. -func (t *Table) Marshal() ([]byte, error) { - return t.ToV2().StableMarshal(nil), nil +// Marshal encodes Table into a binary format of the NeoFS API protocol +// (Protocol Buffers V3 with direct field order). +// +// Marshal writes current protocol version into the resulting message if Result +// hasn't been already decoded from such a message. +// +// See also [Table.Unmarshal]. +func (t Table) Marshal() []byte { + var m acl.EACLTable + t.WriteToV2(&m) + + b := make([]byte, m.MarshaledSize()) + m.MarshalStable(b) + return b } -// SignedData returns actual payload to sign. +// SignedData returns signed data of the Table. // // See also [client.Client.ContainerSetEACL]. func (t Table) SignedData() []byte { - data, _ := t.Marshal() - return data + return t.Marshal() } -// Unmarshal unmarshals protobuf binary representation of Table. +// Unmarshal decodes Protocol Buffers V3 binary data into the Table. Returns an +// error describing a format violation of the specified fields. Unmarshal does +// not check presence of the required fields and, at the same time, checks +// format of presented fields. +// +// See also [Table.Marshal]. func (t *Table) Unmarshal(data []byte) error { - fV2 := new(v2acl.Table) - if err := fV2.Unmarshal(data); err != nil { - return err - } - - // format checks - err := checkFormat(fV2) + var m acl.EACLTable + err := proto.Unmarshal(data, &m) if err != nil { - return err + return fmt.Errorf("decode protobuf: %w", err) } - - *t = *NewTableFromV2(fV2) - - return nil + return t.readFromV2(&m, false) } -// MarshalJSON encodes Table to protobuf JSON format. +// MarshalJSON encodes Table into a JSON format of the NeoFS API protocol +// (Protocol Buffers V3 JSON). +// +// MarshalJSON writes current protocol version into the resulting message if +// Result hasn't been already decoded from such a message. +// +// See also [Table.UnmarshalJSON]. func (t *Table) MarshalJSON() ([]byte, error) { - return t.ToV2().MarshalJSON() + var m acl.EACLTable + t.WriteToV2(&m) + return protojson.Marshal(&m) } -// UnmarshalJSON decodes Table from protobuf JSON format. +// UnmarshalJSON decodes NeoFS API protocol JSON data into the Table (Protocol +// Buffers V3 JSON). Returns an error describing a format violation. +// UnmarshalJSON does not check presence of the required fields and, at the same +// time, checks format of presented fields. +// +// See also [Table.MarshalJSON]. func (t *Table) UnmarshalJSON(data []byte) error { - tV2 := new(v2acl.Table) - if err := tV2.UnmarshalJSON(data); err != nil { - return err - } - - err := checkFormat(tV2) - if err != nil { - return err - } - - *t = *NewTableFromV2(tV2) - - return nil -} - -// EqualTables compares Table with each other. -func EqualTables(t1, t2 Table) bool { - cID1, set1 := t1.CID() - cID2, set2 := t2.CID() - - if set1 != set2 || cID1 != cID2 || - !t1.Version().Equal(t2.Version()) { - return false - } - - rs1, rs2 := t1.Records(), t2.Records() - - if len(rs1) != len(rs2) { - return false - } - - for i := 0; i < len(rs1); i++ { - if !equalRecords(rs1[i], rs2[i]) { - return false - } - } - - return true -} - -func checkFormat(v2 *v2acl.Table) error { - var cID cid.ID - - cidV2 := v2.GetContainerID() - if cidV2 == nil { - return nil - } - - err := cID.ReadFromV2(*cidV2) + var m acl.EACLTable + err := protojson.Unmarshal(data, &m) if err != nil { - return fmt.Errorf("could not convert V2 container ID: %w", err) + return fmt.Errorf("decode protojson: %w", err) } - - return nil + return t.readFromV2(&m, false) } diff --git a/eacl/table_internal_test.go b/eacl/table_internal_test.go deleted file mode 100644 index c07bc8aef..000000000 --- a/eacl/table_internal_test.go +++ /dev/null @@ -1,102 +0,0 @@ -package eacl - -import ( - "bytes" - "crypto/sha256" - "testing" - - cidtest "github.com/nspcc-dev/neofs-sdk-go/container/id/test" - "github.com/nspcc-dev/neofs-sdk-go/version" - "github.com/stretchr/testify/require" -) - -func TestTable_CopyTo(t *testing.T) { - sha := sha256.Sum256([]byte("container id")) - id := cidtest.IDWithChecksum(sha) - - var table Table - table.SetVersion(version.Current()) - table.SetCID(id) - - var target Target - target.SetRole(1) - target.SetBinaryKeys([][]byte{ - {1, 2, 3}, - }) - - record := CreateRecord(ActionAllow, OperationPut) - record.SetTargets(target) - record.AddObjectAttributeFilter(MatchStringEqual, "key", "value") - - table.AddRecord(record) - - t.Run("copy", func(t *testing.T) { - var dst Table - table.CopyTo(&dst) - - bts, err := table.Marshal() - require.NoError(t, err) - - bts2, err := dst.Marshal() - require.NoError(t, err) - - require.Equal(t, table, dst) - require.True(t, bytes.Equal(bts, bts2)) - }) - - t.Run("change version", func(t *testing.T) { - var dst Table - table.CopyTo(&dst) - - require.True(t, table.Version().Equal(dst.Version())) - - var newVersion version.Version - newVersion.SetMajor(10) - newVersion.SetMinor(100) - - dst.SetVersion(newVersion) - - require.False(t, table.Version().Equal(dst.Version())) - }) - - t.Run("change cid", func(t *testing.T) { - var dst Table - table.CopyTo(&dst) - - cid1, isSet1 := table.CID() - require.True(t, isSet1) - - cid2, isSet2 := dst.CID() - require.True(t, isSet2) - - require.True(t, cid1.Equals(cid2)) - - sha = sha256.Sum256([]byte("container id 2")) - dst.SetCID(cidtest.IDWithChecksum(sha)) - - cid1, isSet1 = table.CID() - require.True(t, isSet1) - - cid2, isSet2 = dst.CID() - require.True(t, isSet2) - - require.False(t, cid1.Equals(cid2)) - }) - - t.Run("change record", func(t *testing.T) { - var dst Table - table.CopyTo(&dst) - - require.Equal(t, table.records[0].action, dst.records[0].action) - dst.records[0].SetAction(ActionDeny) - require.NotEqual(t, table.records[0].action, dst.records[0].action) - - require.Equal(t, table.records[0].operation, dst.records[0].operation) - dst.records[0].SetOperation(OperationDelete) - require.NotEqual(t, table.records[0].operation, dst.records[0].operation) - - require.Equal(t, table.records[0].targets[0].role, dst.records[0].targets[0].role) - table.records[0].targets[0].SetRole(1234) - require.NotEqual(t, table.records[0].targets[0].role, dst.records[0].targets[0].role) - }) -} diff --git a/eacl/table_test.go b/eacl/table_test.go index bd2a9ec00..63b7663f1 100644 --- a/eacl/table_test.go +++ b/eacl/table_test.go @@ -1,119 +1,341 @@ package eacl_test import ( - "crypto/sha256" "testing" - "github.com/nspcc-dev/neofs-api-go/v2/refs" + apiacl "github.com/nspcc-dev/neofs-sdk-go/api/acl" + "github.com/nspcc-dev/neofs-sdk-go/api/refs" cidtest "github.com/nspcc-dev/neofs-sdk-go/container/id/test" "github.com/nspcc-dev/neofs-sdk-go/eacl" eacltest "github.com/nspcc-dev/neofs-sdk-go/eacl/test" - "github.com/nspcc-dev/neofs-sdk-go/version" "github.com/stretchr/testify/require" + "google.golang.org/protobuf/encoding/protojson" + "google.golang.org/protobuf/proto" ) -func TestTable(t *testing.T) { - var v version.Version +func TestTable_Version(t *testing.T) { + t.Run("encoding", func(t *testing.T) { + t.Run("binary", func(t *testing.T) { + var src, dst eacl.Table + var msg apiacl.EACLTable - sha := sha256.Sum256([]byte("container id")) - id := cidtest.IDWithChecksum(sha) + err := dst.Unmarshal(src.Marshal()) + require.NoError(t, err) + err = proto.Unmarshal(dst.Marshal(), &msg) + require.Equal(t, &refs.Version{Major: 2, Minor: 13}, msg.Version) - v.SetMajor(3) - v.SetMinor(2) + msg.Version.Major, msg.Version.Minor = 3, 14 - table := eacl.NewTable() - table.SetVersion(v) - table.SetCID(id) - table.AddRecord(eacl.CreateRecord(eacl.ActionAllow, eacl.OperationPut)) + b, err := proto.Marshal(&msg) + require.NoError(t, err) + err = src.Unmarshal(b) + require.NoError(t, err) - v2 := table.ToV2() - require.NotNil(t, v2) - require.Equal(t, uint32(3), v2.GetVersion().GetMajor()) - require.Equal(t, uint32(2), v2.GetVersion().GetMinor()) - require.Equal(t, sha[:], v2.GetContainerID().GetValue()) - require.Len(t, v2.GetRecords(), 1) + err = dst.Unmarshal(src.Marshal()) + require.NoError(t, err) + msg.Version = nil + err = proto.Unmarshal(dst.Marshal(), &msg) + require.Equal(t, &refs.Version{Major: 3, Minor: 14}, msg.Version) + }) + t.Run("api", func(t *testing.T) { + var src, dst eacl.Table + var msg apiacl.EACLTable - newTable := eacl.NewTableFromV2(v2) - require.Equal(t, table, newTable) + src.SetRecords(eacltest.NRecords(2)) // just to satisfy decoder - t.Run("new from nil v2 table", func(t *testing.T) { - require.Equal(t, new(eacl.Table), eacl.NewTableFromV2(nil)) - }) + src.WriteToV2(&msg) + err := dst.ReadFromV2(&msg) + require.NoError(t, err) + err = proto.Unmarshal(dst.Marshal(), &msg) + require.Equal(t, &refs.Version{Major: 2, Minor: 13}, msg.Version) + + msg.Version.Major, msg.Version.Minor = 3, 14 + + b, err := proto.Marshal(&msg) + require.NoError(t, err) + err = src.Unmarshal(b) + require.NoError(t, err) + + src.WriteToV2(&msg) + err = dst.ReadFromV2(&msg) + require.NoError(t, err) + msg.Version = nil + err = proto.Unmarshal(dst.Marshal(), &msg) + require.Equal(t, &refs.Version{Major: 3, Minor: 14}, msg.Version) + }) + t.Run("json", func(t *testing.T) { + var src, dst eacl.Table + var msg apiacl.EACLTable + + j, err := src.MarshalJSON() + require.NoError(t, err) + err = dst.UnmarshalJSON(j) + require.NoError(t, err) + j, err = dst.MarshalJSON() + require.NoError(t, err) + err = protojson.Unmarshal(j, &msg) + require.EqualValues(t, 2, msg.Version.Major) + require.EqualValues(t, 13, msg.Version.Minor) - t.Run("create table", func(t *testing.T) { - id := cidtest.ID() + msg.Version.Major, msg.Version.Minor = 3, 14 - table := eacl.CreateTable(id) - cID, set := table.CID() - require.True(t, set) - require.Equal(t, id, cID) - require.Equal(t, version.Current(), table.Version()) + b, err := protojson.Marshal(&msg) + require.NoError(t, err) + err = src.UnmarshalJSON(b) + require.NoError(t, err) + + j, err = src.MarshalJSON() + require.NoError(t, err) + err = dst.UnmarshalJSON(j) + require.NoError(t, err) + msg.Version = nil + j, err = dst.MarshalJSON() + require.NoError(t, err) + err = protojson.Unmarshal(j, &msg) + require.EqualValues(t, 3, msg.Version.Major) + require.EqualValues(t, 14, msg.Version.Minor) + }) }) } -func TestTable_AddRecord(t *testing.T) { - records := []eacl.Record{ - *eacl.CreateRecord(eacl.ActionDeny, eacl.OperationDelete), - *eacl.CreateRecord(eacl.ActionAllow, eacl.OperationPut), - } +func TestTable_LimitToContainer(t *testing.T) { + var tbl eacl.Table - table := eacl.NewTable() - for _, record := range records { - table.AddRecord(&record) - } + require.Zero(t, tbl.LimitedContainer()) - require.Equal(t, records, table.Records()) -} + cnr := cidtest.ID() + cnrOther := cidtest.ChangeID(cnr) + + tbl.LimitToContainer(cnr) + require.Equal(t, cnr, tbl.LimitedContainer()) + + tbl.LimitToContainer(cnrOther) + require.Equal(t, cnrOther, tbl.LimitedContainer()) + + t.Run("encoding", func(t *testing.T) { + t.Run("binary", func(t *testing.T) { + var src, dst eacl.Table -func TestTableEncoding(t *testing.T) { - tab := eacltest.Table(t) + dst.LimitToContainer(cnr) - t.Run("binary", func(t *testing.T) { - data, err := tab.Marshal() - require.NoError(t, err) + err := dst.Unmarshal(src.Marshal()) + require.NoError(t, err) + require.Zero(t, dst.LimitedContainer()) - tab2 := eacl.NewTable() - require.NoError(t, tab2.Unmarshal(data)) + dst.LimitToContainer(cnrOther) + src.LimitToContainer(cnr) + err = dst.Unmarshal(src.Marshal()) + require.NoError(t, err) + require.Equal(t, cnr, dst.LimitedContainer()) + }) + t.Run("api", func(t *testing.T) { + var src, dst eacl.Table + var msg apiacl.EACLTable - // FIXME: we compare v2 messages because - // Filter contains fmt.Stringer interface - require.Equal(t, tab.ToV2(), tab2.ToV2()) + src.SetRecords(eacltest.NRecords(2)) // just to satisfy decoder + + dst.LimitToContainer(cnr) + + src.WriteToV2(&msg) + err := dst.ReadFromV2(&msg) + require.NoError(t, err) + require.Zero(t, dst.LimitedContainer()) + + dst.LimitToContainer(cnrOther) + src.LimitToContainer(cnr) + src.WriteToV2(&msg) + err = dst.ReadFromV2(&msg) + require.NoError(t, err) + require.Equal(t, cnr, dst.LimitedContainer()) + }) + t.Run("json", func(t *testing.T) { + var src, dst eacl.Table + + dst.LimitToContainer(cnr) + + j, err := src.MarshalJSON() + require.NoError(t, err) + err = dst.UnmarshalJSON(j) + require.NoError(t, err) + require.Zero(t, dst.LimitedContainer()) + + dst.LimitToContainer(cnrOther) + src.LimitToContainer(cnr) + j, err = src.MarshalJSON() + require.NoError(t, err) + err = dst.UnmarshalJSON(j) + require.NoError(t, err) + require.Equal(t, cnr, dst.LimitedContainer()) + }) }) +} + +func TestTable_Records(t *testing.T) { + var tbl eacl.Table + + require.Zero(t, tbl.Records()) - t.Run("json", func(t *testing.T) { - data, err := tab.MarshalJSON() - require.NoError(t, err) + rs := eacltest.NRecords(3) + tbl.SetRecords(rs) + require.Equal(t, rs, tbl.Records()) - tab2 := eacl.NewTable() - require.NoError(t, tab2.UnmarshalJSON(data)) + t.Run("encoding", func(t *testing.T) { + t.Run("binary", func(t *testing.T) { + var src, dst eacl.Table - require.Equal(t, tab.ToV2(), tab2.ToV2()) + dst.SetRecords(eacltest.NRecords(2)) + + err := dst.Unmarshal(src.Marshal()) + require.NoError(t, err) + require.Nil(t, dst.Records()) + + dst.SetRecords(eacltest.NRecords(3)) + rs := eacltest.NRecords(3) + src.SetRecords(rs) + err = dst.Unmarshal(src.Marshal()) + require.NoError(t, err) + require.Equal(t, rs, dst.Records()) + }) + t.Run("api", func(t *testing.T) { + var src, dst eacl.Table + var msg apiacl.EACLTable + + dst.SetRecords(eacltest.NRecords(3)) + rs := eacltest.NRecords(3) + src.SetRecords(rs) + src.WriteToV2(&msg) + err := dst.ReadFromV2(&msg) + require.NoError(t, err) + require.Equal(t, rs, dst.Records()) + }) + t.Run("json", func(t *testing.T) { + var src, dst eacl.Table + + dst.SetRecords(eacltest.NRecords(2)) + + j, err := src.MarshalJSON() + require.NoError(t, err) + err = dst.UnmarshalJSON(j) + require.NoError(t, err) + require.Nil(t, dst.Records()) + + dst.SetRecords(eacltest.NRecords(3)) + rs := eacltest.NRecords(3) + src.SetRecords(rs) + j, err = src.MarshalJSON() + require.NoError(t, err) + err = dst.UnmarshalJSON(j) + require.NoError(t, err) + require.Equal(t, rs, dst.Records()) + }) }) } -func TestTable_ToV2(t *testing.T) { - t.Run("nil", func(t *testing.T) { - var x *eacl.Table +func TestTable_CopyTo(t *testing.T) { + src := eacltest.Table() + + dst := eacltest.Table() + src.CopyTo(&dst) + require.Equal(t, src, dst) + + originAction := src.Records()[0].Action() + otherAction := originAction + 1 + src.Records()[0].SetAction(otherAction) + require.Equal(t, otherAction, src.Records()[0].Action()) + require.Equal(t, originAction, dst.Records()[0].Action()) +} + +func TestTable_SignedData(t *testing.T) { + tbl := eacltest.Table() + require.Equal(t, tbl.Marshal(), tbl.SignedData()) +} - require.Nil(t, x.ToV2()) +func TestTable_ReadFromV2(t *testing.T) { + t.Run("missing fields", func(t *testing.T) { + t.Run("records", func(t *testing.T) { + tbl := eacltest.Table() + tbl.SetRecords(nil) + var m apiacl.EACLTable + tbl.WriteToV2(&m) + require.ErrorContains(t, tbl.ReadFromV2(&m), "missing records") + }) }) + t.Run("invalid fields", func(t *testing.T) { + t.Run("container", func(t *testing.T) { + tbl := eacltest.Table() + var m apiacl.EACLTable + tbl.WriteToV2(&m) - t.Run("default values", func(t *testing.T) { - table := eacl.NewTable() + m.ContainerId.Value = []byte("not_a_container_ID") + require.ErrorContains(t, tbl.ReadFromV2(&m), "invalid container") + }) + t.Run("records", func(t *testing.T) { + t.Run("targets", func(t *testing.T) { + rs := eacltest.NRecords(2) + rs[1].SetTargets(eacltest.NTargets(3)) + tbl := eacltest.Table() + tbl.SetRecords(rs) + var m apiacl.EACLTable + tbl.WriteToV2(&m) - // check initial values - require.Equal(t, version.Current(), table.Version()) - require.Nil(t, table.Records()) - _, set := table.CID() - require.False(t, set) + m.Records[1].Targets[2].Role, m.Records[1].Targets[2].Keys = 0, nil + require.ErrorContains(t, tbl.ReadFromV2(&m), "invalid record #1: invalid target #2: role and public keys are not mutually exclusive") + m.Records[1].Targets[2].Role, m.Records[1].Targets[2].Keys = 1, make([][]byte, 1) + require.ErrorContains(t, tbl.ReadFromV2(&m), "invalid record #1: invalid target #2: role and public keys are not mutually exclusive") + m.Records[1].Targets = nil + require.ErrorContains(t, tbl.ReadFromV2(&m), "invalid record #1: missing target subjects") + }) + t.Run("filters", func(t *testing.T) { + rs := eacltest.NRecords(2) + rs[1].SetFilters(eacltest.NFilters(3)) + tbl := eacltest.Table() + tbl.SetRecords(rs) + var m apiacl.EACLTable + tbl.WriteToV2(&m) - // convert to v2 message - tableV2 := table.ToV2() + m.Records[1].Filters[2].Key = "" + require.ErrorContains(t, tbl.ReadFromV2(&m), "invalid record #1: invalid filter #2: missing key") + }) + }) + }) +} - var verV2 refs.Version - version.Current().WriteToV2(&verV2) - require.Equal(t, verV2, *tableV2.GetVersion()) - require.Nil(t, tableV2.GetRecords()) - require.Nil(t, tableV2.GetContainerID()) +func TestTable_Unmarshal(t *testing.T) { + t.Run("invalid binary", func(t *testing.T) { + var tbl eacl.Table + msg := []byte("definitely_not_protobuf") + err := tbl.Unmarshal(msg) + require.ErrorContains(t, err, "decode protobuf") + }) + t.Run("invalid fields", func(t *testing.T) { + t.Run("container", func(t *testing.T) { + tbl := eacltest.Table() + var m apiacl.EACLTable + tbl.WriteToV2(&m) + m.ContainerId.Value = []byte("not_a_container_ID") + b, err := proto.Marshal(&m) + require.NoError(t, err) + require.ErrorContains(t, tbl.Unmarshal(b), "invalid container") + }) + }) +} + +func TestTable_UnmarshalJSON(t *testing.T) { + t.Run("invalid json", func(t *testing.T) { + var tbl eacl.Table + msg := []byte("definitely_not_protojson") + err := tbl.UnmarshalJSON(msg) + require.ErrorContains(t, err, "decode protojson") + }) + t.Run("invalid fields", func(t *testing.T) { + t.Run("container", func(t *testing.T) { + tbl := eacltest.Table() + var m apiacl.EACLTable + tbl.WriteToV2(&m) + m.ContainerId.Value = []byte("not_a_container_ID") + b, err := protojson.Marshal(&m) + require.NoError(t, err) + require.ErrorContains(t, tbl.UnmarshalJSON(b), "invalid container") + }) }) } diff --git a/eacl/target.go b/eacl/target.go index bbe650393..f4201b41a 100644 --- a/eacl/target.go +++ b/eacl/target.go @@ -2,197 +2,89 @@ package eacl import ( "bytes" - "crypto/ecdsa" + "errors" - "github.com/nspcc-dev/neo-go/pkg/crypto/keys" - v2acl "github.com/nspcc-dev/neofs-api-go/v2/acl" + apiacl "github.com/nspcc-dev/neofs-sdk-go/api/acl" ) -// Target is a group of request senders to match ContainerEACL. Defined by role enum -// and set of public keys. -// -// Target is compatible with v2 acl.EACLRecord.Target message. +// Target describes the parties that are subject to a specific access rule. type Target struct { role Role keys [][]byte } -func ecdsaKeysToPtrs(keys []ecdsa.PublicKey) []*ecdsa.PublicKey { - keysPtr := make([]*ecdsa.PublicKey, len(keys)) +// CopyTo writes deep copy of the [Target] to dst. +func (t Target) CopyTo(dst *Target) { + dst.role = t.role - for i := range keys { - keysPtr[i] = &keys[i] + if t.keys != nil { + dst.keys = make([][]byte, len(t.keys)) + for i := range t.keys { + dst.keys[i] = bytes.Clone(t.keys[i]) + } + } else { + dst.keys = nil } +} - return keysPtr +func isEmptyTarget(t Target) bool { + return t.role == 0 && len(t.keys) == 0 } -// CopyTo writes deep copy of the [Target] to dst. -func (t Target) CopyTo(dst *Target) { - dst.role = t.role +func targetToAPI(t Target) *apiacl.EACLRecord_Target { + if isEmptyTarget(t) { + return nil + } + return &apiacl.EACLRecord_Target{ + Role: apiacl.Role(t.role), + Keys: t.keys, + } +} - dst.keys = make([][]byte, len(t.keys)) - for i := range t.keys { - dst.keys[i] = bytes.Clone(t.keys[i]) +func (t *Target) readFromV2(m *apiacl.EACLRecord_Target, checkFieldPresence bool) error { + if checkFieldPresence && (m.Role == 0 == (len(m.Keys) == 0)) { + return errors.New("role and public keys are not mutually exclusive") } + + t.role = Role(m.Role) + t.keys = m.Keys + + return nil } -// BinaryKeys returns list of public keys to identify -// target subject in a binary format. +// PublicKeys returns list of public keys to identify target subjects. Overlaps +// [Target.Role]. // // Each element of the resulting slice is a serialized compressed public key. See [elliptic.MarshalCompressed]. // Use [neofsecdsa.PublicKey.Decode] to decode it into a type-specific structure. // // The value returned shares memory with the structure itself, so changing it can lead to data corruption. // Make a copy if you need to change it. -func (t *Target) BinaryKeys() [][]byte { +func (t Target) PublicKeys() [][]byte { return t.keys } -// SetBinaryKeys sets list of binary public keys to identify -// target subject. +// SetPublicKeys sets list of binary public keys to identify target subjects. +// Overlaps [Target.SetRole]. // // Each element of the keys parameter is a slice of bytes is a serialized compressed public key. // See [elliptic.MarshalCompressed]. -func (t *Target) SetBinaryKeys(keys [][]byte) { +func (t *Target) SetPublicKeys(keys [][]byte) { t.keys = keys } -// SetTargetECDSAKeys converts ECDSA public keys to a binary -// format and stores them in Target. -func SetTargetECDSAKeys(t *Target, pubs ...*ecdsa.PublicKey) { - binKeys := t.BinaryKeys() - ln := len(pubs) - - if cap(binKeys) >= ln { - binKeys = binKeys[:0] - } else { - binKeys = make([][]byte, 0, ln) - } - - for i := 0; i < ln; i++ { - binKeys = append(binKeys, (*keys.PublicKey)(pubs[i]).Bytes()) - } - - t.SetBinaryKeys(binKeys) -} - -// TargetECDSAKeys interprets binary public keys of Target -// as ECDSA public keys. If any key has a different format, -// the corresponding element will be nil. -func TargetECDSAKeys(t *Target) []*ecdsa.PublicKey { - binKeys := t.BinaryKeys() - ln := len(binKeys) - - pubs := make([]*ecdsa.PublicKey, ln) - - for i := 0; i < ln; i++ { - p := new(keys.PublicKey) - if p.DecodeBytes(binKeys[i]) == nil { - pubs[i] = (*ecdsa.PublicKey)(p) - } - } - - return pubs -} - -// SetRole sets target subject's role class. +// SetRole sets role to identify group of target subjects. Overlaps with +// [Target.SetPublicKeys]. +// +// See also [Target.Role]. func (t *Target) SetRole(r Role) { t.role = r } -// Role returns target subject's role class. +// Role returns role to identify group of target subjects. Overlaps with +// [Target.PublicKeys]. +// +// See also [Target.SetRole]. func (t Target) Role() Role { return t.role } - -// ToV2 converts Target to v2 acl.EACLRecord.Target message. -// -// Nil Target converts to nil. -func (t *Target) ToV2() *v2acl.Target { - if t == nil { - return nil - } - - target := new(v2acl.Target) - target.SetRole(t.role.ToV2()) - target.SetKeys(t.keys) - - return target -} - -// NewTarget creates, initializes and returns blank Target instance. -// -// Defaults: -// - role: RoleUnknown; -// - keys: nil. -func NewTarget() *Target { - return NewTargetFromV2(new(v2acl.Target)) -} - -// NewTargetFromV2 converts v2 acl.EACLRecord.Target message to Target. -func NewTargetFromV2(target *v2acl.Target) *Target { - if target == nil { - return new(Target) - } - - return &Target{ - role: RoleFromV2(target.GetRole()), - keys: target.GetKeys(), - } -} - -// Marshal marshals Target into a protobuf binary form. -func (t *Target) Marshal() ([]byte, error) { - return t.ToV2().StableMarshal(nil), nil -} - -// Unmarshal unmarshals protobuf binary representation of Target. -func (t *Target) Unmarshal(data []byte) error { - fV2 := new(v2acl.Target) - if err := fV2.Unmarshal(data); err != nil { - return err - } - - *t = *NewTargetFromV2(fV2) - - return nil -} - -// MarshalJSON encodes Target to protobuf JSON format. -func (t *Target) MarshalJSON() ([]byte, error) { - return t.ToV2().MarshalJSON() -} - -// UnmarshalJSON decodes Target from protobuf JSON format. -func (t *Target) UnmarshalJSON(data []byte) error { - tV2 := new(v2acl.Target) - if err := tV2.UnmarshalJSON(data); err != nil { - return err - } - - *t = *NewTargetFromV2(tV2) - - return nil -} - -// equalTargets compares Target with each other. -func equalTargets(t1, t2 Target) bool { - if t1.Role() != t2.Role() { - return false - } - - keys1, keys2 := t1.BinaryKeys(), t2.BinaryKeys() - - if len(keys1) != len(keys2) { - return false - } - - for i := 0; i < len(keys1); i++ { - if !bytes.Equal(keys1[i], keys2[i]) { - return false - } - } - - return true -} diff --git a/eacl/target_test.go b/eacl/target_test.go index 43bac2a87..381bc2593 100644 --- a/eacl/target_test.go +++ b/eacl/target_test.go @@ -1,122 +1,43 @@ -package eacl +package eacl_test import ( "bytes" - "crypto/ecdsa" "testing" - "github.com/nspcc-dev/neo-go/pkg/crypto/keys" - "github.com/nspcc-dev/neofs-api-go/v2/acl" - v2acl "github.com/nspcc-dev/neofs-api-go/v2/acl" + "github.com/nspcc-dev/neofs-sdk-go/eacl" + eacltest "github.com/nspcc-dev/neofs-sdk-go/eacl/test" "github.com/stretchr/testify/require" ) -func TestTarget(t *testing.T) { - pubs := []*ecdsa.PublicKey{ - randomPublicKey(t), - randomPublicKey(t), - } +func TestTarget_Role(t *testing.T) { + var tgt eacl.Target + require.Zero(t, tgt.Role()) - target := NewTarget() - target.SetRole(RoleSystem) - SetTargetECDSAKeys(target, pubs...) - - v2 := target.ToV2() - require.NotNil(t, v2) - require.Equal(t, v2acl.RoleSystem, v2.GetRole()) - require.Len(t, v2.GetKeys(), len(pubs)) - for i, key := range v2.GetKeys() { - require.Equal(t, key, (*keys.PublicKey)(pubs[i]).Bytes()) - } - - newTarget := NewTargetFromV2(v2) - require.Equal(t, target, newTarget) - - t.Run("from nil v2 target", func(t *testing.T) { - require.Equal(t, new(Target), NewTargetFromV2(nil)) - }) -} - -func TestTargetEncoding(t *testing.T) { - tar := NewTarget() - tar.SetRole(RoleSystem) - SetTargetECDSAKeys(tar, randomPublicKey(t)) - - t.Run("binary", func(t *testing.T) { - data, err := tar.Marshal() - require.NoError(t, err) - - tar2 := NewTarget() - require.NoError(t, tar2.Unmarshal(data)) - - require.Equal(t, tar, tar2) - }) - - t.Run("json", func(t *testing.T) { - data, err := tar.MarshalJSON() - require.NoError(t, err) - - tar2 := NewTarget() - require.NoError(t, tar2.UnmarshalJSON(data)) - - require.Equal(t, tar, tar2) - }) + tgt.SetRole(13) + require.EqualValues(t, 13, tgt.Role()) + tgt.SetRole(42) + require.EqualValues(t, 42, tgt.Role()) } -func TestTarget_ToV2(t *testing.T) { - t.Run("nil", func(t *testing.T) { - var x *Target - - require.Nil(t, x.ToV2()) - }) - - t.Run("default values", func(t *testing.T) { - target := NewTarget() - - // check initial values - require.Equal(t, RoleUnknown, target.Role()) - require.Nil(t, target.BinaryKeys()) - - // convert to v2 message - targetV2 := target.ToV2() +func TestTarget_PublicKeys(t *testing.T) { + var tgt eacl.Target + require.Zero(t, tgt.PublicKeys()) - require.Equal(t, acl.RoleUnknown, targetV2.GetRole()) - require.Nil(t, targetV2.GetKeys()) - }) + tgt.SetPublicKeys([][]byte{[]byte("key1"), []byte("key2")}) + require.Equal(t, [][]byte{[]byte("key1"), []byte("key2")}, tgt.PublicKeys()) + tgt.SetPublicKeys([][]byte{[]byte("key3"), []byte("key4")}) + require.Equal(t, [][]byte{[]byte("key3"), []byte("key4")}, tgt.PublicKeys()) } func TestTarget_CopyTo(t *testing.T) { - var target Target - target.SetRole(1) - target.SetBinaryKeys([][]byte{ - {1, 2, 3}, - }) - - t.Run("copy", func(t *testing.T) { - var dst Target - target.CopyTo(&dst) - - bts, err := target.Marshal() - require.NoError(t, err) - - bts2, err := dst.Marshal() - require.NoError(t, err) - - require.Equal(t, target, dst) - require.True(t, bytes.Equal(bts, bts2)) - }) - - t.Run("change", func(t *testing.T) { - var dst Target - target.CopyTo(&dst) + src := eacltest.Target() + src.SetPublicKeys([][]byte{[]byte("key1"), []byte("key2")}) - require.Equal(t, target.role, dst.role) - dst.SetRole(2) - require.NotEqual(t, target.role, dst.role) + var dst eacl.Target + src.CopyTo(&dst) + require.Equal(t, src, dst) - require.True(t, bytes.Equal(target.keys[0], dst.keys[0])) - // change some key data - dst.keys[0][0] = 5 - require.False(t, bytes.Equal(target.keys[0], dst.keys[0])) - }) + originKey := bytes.Clone(src.PublicKeys()[0]) + src.PublicKeys()[0][0]++ + require.Equal(t, originKey, dst.PublicKeys()[0]) } diff --git a/eacl/test/benchmark_test.go b/eacl/test/benchmark_test.go deleted file mode 100644 index 6e514b3b5..000000000 --- a/eacl/test/benchmark_test.go +++ /dev/null @@ -1,117 +0,0 @@ -package eacltest - -import ( - "bytes" - "math/rand" - "testing" - - cidtest "github.com/nspcc-dev/neofs-sdk-go/container/id/test" - "github.com/nspcc-dev/neofs-sdk-go/eacl" - versiontest "github.com/nspcc-dev/neofs-sdk-go/version/test" - "github.com/stretchr/testify/require" -) - -func baseBenchmarkTableBinaryComparison(b *testing.B, factor int) { - t := TableN(factor) - exp, err := t.Marshal() - require.NoError(b, err) - - b.StopTimer() - b.ResetTimer() - b.StartTimer() - for i := 0; i < b.N; i++ { - got, _ := t.Marshal() - if !bytes.Equal(exp, got) { - b.Fail() - } - } -} - -func baseBenchmarkTableEqualsComparison(b *testing.B, factor int) { - t := TableN(factor) - data, err := t.Marshal() - require.NoError(b, err) - t2 := eacl.NewTable() - err = t2.Unmarshal(data) - require.NoError(b, err) - - b.StopTimer() - b.ResetTimer() - b.StartTimer() - for i := 0; i < b.N; i++ { - if !eacl.EqualTables(*t, *t2) { - b.Fail() - } - } -} - -func BenchmarkTableBinaryComparison(b *testing.B) { - baseBenchmarkTableBinaryComparison(b, 1) -} - -func BenchmarkTableEqualsComparison(b *testing.B) { - baseBenchmarkTableEqualsComparison(b, 1) -} - -func BenchmarkTableBinaryComparison10(b *testing.B) { - baseBenchmarkTableBinaryComparison(b, 10) -} - -func BenchmarkTableEqualsComparison10(b *testing.B) { - baseBenchmarkTableEqualsComparison(b, 10) -} - -func BenchmarkTableBinaryComparison100(b *testing.B) { - baseBenchmarkTableBinaryComparison(b, 100) -} - -func BenchmarkTableEqualsComparison100(b *testing.B) { - baseBenchmarkTableEqualsComparison(b, 100) -} - -// Target returns random eacl.Target. -func TargetN(n int) *eacl.Target { - x := eacl.NewTarget() - - x.SetRole(eacl.RoleSystem) - keys := make([][]byte, n) - - for i := 0; i < n; i++ { - keys[i] = make([]byte, 32) - //nolint:staticcheck - rand.Read(keys[i]) - } - - x.SetBinaryKeys(keys) - - return x -} - -// Record returns random eacl.Record. -func RecordN(n int) *eacl.Record { - x := eacl.NewRecord() - - x.SetAction(eacl.ActionAllow) - x.SetOperation(eacl.OperationRangeHash) - x.SetTargets(*TargetN(n)) - - for i := 0; i < n; i++ { - x.AddFilter(eacl.HeaderFromObject, eacl.MatchStringEqual, "", cidtest.ID().EncodeToString()) - } - - return x -} - -func TableN(n int) *eacl.Table { - x := eacl.NewTable() - - x.SetCID(cidtest.ID()) - - for i := 0; i < n; i++ { - x.AddRecord(RecordN(n)) - } - - x.SetVersion(versiontest.Version()) - - return x -} diff --git a/eacl/test/generate.go b/eacl/test/generate.go index b536b30cf..243230325 100644 --- a/eacl/test/generate.go +++ b/eacl/test/generate.go @@ -1,50 +1,89 @@ package eacltest import ( - "testing" + "fmt" + "math/rand" + "strconv" + "github.com/nspcc-dev/neofs-sdk-go/container/acl" cidtest "github.com/nspcc-dev/neofs-sdk-go/container/id/test" "github.com/nspcc-dev/neofs-sdk-go/eacl" - usertest "github.com/nspcc-dev/neofs-sdk-go/user/test" - versiontest "github.com/nspcc-dev/neofs-sdk-go/version/test" ) -// Target returns random eacl.Target. +// Target returns random [eacl.Target]. func Target() eacl.Target { - x := eacl.NewTarget() + var x eacl.Target + if rand.Int()%2 == 0 { + ks := make([][]byte, 3) + for j := range ks { + ks[j] = make([]byte, 33) + rand.Read(ks[j]) + } + x.SetPublicKeys(ks) + } else { + x.SetRole(eacl.Role(rand.Int())) + } + return x +} - x.SetRole(eacl.RoleSystem) - x.SetBinaryKeys([][]byte{ - {1, 2, 3}, - {4, 5, 6}, - }) +// NTargets returns n random [eacl.Target] instances. +func NTargets(n int) []eacl.Target { + res := make([]eacl.Target, n) + for i := range res { + res[i] = Target() + } + return res +} - return *x +// Filter returns random [eacl.Filter]. +func Filter() eacl.Filter { + var x eacl.Filter + x.SetKey("key_" + strconv.Itoa(rand.Int())) + x.SetValue("val_" + strconv.Itoa(rand.Int())) + x.SetAttributeType(eacl.AttributeType(rand.Int())) + x.SetMatcher(eacl.Match(rand.Int())) + return x } -// Record returns random eacl.Record. -func Record(tb testing.TB) eacl.Record { - x := eacl.NewRecord() +// NFilters returns n random [eacl.Filter] instances. +func NFilters(n int) []eacl.Filter { + res := make([]eacl.Filter, n) + for i := range res { + res[i] = Filter() + } + return res +} - x.SetAction(eacl.ActionAllow) - x.SetOperation(eacl.OperationRangeHash) - x.SetTargets(Target(), Target()) - x.AddObjectContainerIDFilter(eacl.MatchStringEqual, cidtest.ID()) - usr := usertest.ID(tb) - x.AddObjectOwnerIDFilter(eacl.MatchStringNotEqual, &usr) +// Record returns random [eacl.Record]. +func Record() eacl.Record { + var x eacl.Record + x.SetAction(eacl.Action(rand.Int())) + x.SetOperation(acl.Op(rand.Int())) + x.SetTargets(NTargets(1 + rand.Int()%3)) + if n := rand.Int() % 4; n > 0 { + x.SetFilters(NFilters(n)) + } + return x +} - return *x +// NRecords returns n random [eacl.Record] instances. +func NRecords(n int) []eacl.Record { + res := make([]eacl.Record, n) + for i := range res { + res[i] = Record() + } + return res } -func Table(tb testing.TB) eacl.Table { - x := eacl.NewTable() +// Table returns random [eacl.Table]. +func Table() eacl.Table { + var x eacl.Table + x.LimitToContainer(cidtest.ID()) + x.SetRecords(NRecords(1 + rand.Int()%3)) - x.SetCID(cidtest.ID()) - r1 := Record(tb) - x.AddRecord(&r1) - r2 := Record(tb) - x.AddRecord(&r2) - x.SetVersion(versiontest.Version()) + if err := x.Unmarshal(x.Marshal()); err != nil { // to fill utility fields + panic(fmt.Errorf("unexpected eACL encode-decode failure: %w", err)) + } - return *x + return x } diff --git a/eacl/test/generate_test.go b/eacl/test/generate_test.go new file mode 100644 index 000000000..4d1d54060 --- /dev/null +++ b/eacl/test/generate_test.go @@ -0,0 +1,55 @@ +package eacltest_test + +import ( + "testing" + + "github.com/nspcc-dev/neofs-sdk-go/api/acl" + "github.com/nspcc-dev/neofs-sdk-go/eacl" + eacltest "github.com/nspcc-dev/neofs-sdk-go/eacl/test" + "github.com/stretchr/testify/require" +) + +func TestFilter(t *testing.T) { + require.NotEqual(t, eacltest.Filter(), eacltest.Filter()) +} + +func TestNFilters(t *testing.T) { + res := eacltest.NFilters(3) + require.Len(t, res, 3) + require.NotEqual(t, res, eacltest.NFilters(3)) +} + +func TestTarget(t *testing.T) { + require.NotEqual(t, eacltest.Target(), eacltest.Target()) +} + +func TestNTargets(t *testing.T) { + res := eacltest.NTargets(3) + require.Len(t, res, 3) + require.NotEqual(t, res, eacltest.NTargets(3)) +} + +func TestRecord(t *testing.T) { + require.NotEqual(t, eacltest.Record(), eacltest.Record()) +} + +func TestNRecords(t *testing.T) { + res := eacltest.NRecords(3) + require.Len(t, res, 3) + require.NotEqual(t, res, eacltest.NRecords(3)) +} + +func TestTable(t *testing.T) { + tbl := eacltest.Table() + require.NotEqual(t, tbl, eacltest.Table()) + + var tbl2 eacl.Table + require.NoError(t, tbl2.Unmarshal(tbl.Marshal())) + require.Equal(t, tbl, tbl2) + + var m acl.EACLTable + tbl.WriteToV2(&m) + var tbl3 eacl.Table + require.NoError(t, tbl3.ReadFromV2(&m)) + require.Equal(t, tbl, tbl3) +} diff --git a/eacl/types.go b/eacl/types.go index ef045821f..6ad78db9d 100644 --- a/eacl/types.go +++ b/eacl/types.go @@ -1,6 +1,7 @@ package eacl import ( + "github.com/nspcc-dev/neofs-sdk-go/container/acl" cid "github.com/nspcc-dev/neofs-sdk-go/container/id" ) @@ -18,7 +19,7 @@ type TypedHeaderSource interface { // // It returns any problem encountered through the boolean // false value. - HeadersOfType(FilterHeaderType) ([]Header, bool) + HeadersOfType(AttributeType) ([]Header, bool) } // ValidationUnit represents unit of check for Validator. @@ -27,7 +28,7 @@ type ValidationUnit struct { role Role - op Operation + op acl.Op hdrSrc TypedHeaderSource @@ -55,7 +56,7 @@ func (u *ValidationUnit) WithRole(v Role) *ValidationUnit { } // WithOperation configures ValidationUnit to use v as request's operation. -func (u *ValidationUnit) WithOperation(v Operation) *ValidationUnit { +func (u *ValidationUnit) WithOperation(v acl.Op) *ValidationUnit { if u != nil { u.op = v } diff --git a/eacl/validator.go b/eacl/validator.go index 7387ada06..1395d6bf3 100644 --- a/eacl/validator.go +++ b/eacl/validator.go @@ -64,7 +64,7 @@ func matchFilters(hdrSrc TypedHeaderSource, filters []Filter) int { nextFilter: for _, filter := range filters { - headers, ok := hdrSrc.HeadersOfType(filter.From()) + headers, ok := hdrSrc.HeadersOfType(filter.AttributeType()) if !ok { return -1 } @@ -150,7 +150,7 @@ func targetMatches(unit *ValidationUnit, record *Record) bool { } // check public key match - if pubs := target.BinaryKeys(); len(pubs) != 0 { + if pubs := target.PublicKeys(); len(pubs) != 0 { for _, key := range pubs { if bytes.Equal(key, unit.key) { return true diff --git a/eacl/validator_test.go b/eacl/validator_test.go index 019b383ed..c23afc5b8 100644 --- a/eacl/validator_test.go +++ b/eacl/validator_test.go @@ -4,6 +4,7 @@ import ( "math/rand" "testing" + "github.com/nspcc-dev/neofs-sdk-go/container/acl" "github.com/stretchr/testify/require" ) @@ -26,24 +27,39 @@ func checkDefaultAction(t *testing.T, v *Validator, vu *ValidationUnit, msgAndAr } func TestFilterMatch(t *testing.T) { - tgt := *NewTarget() + var tgt Target tgt.SetRole(RoleOthers) t.Run("simple header match", func(t *testing.T) { - tb := NewTable() + var f Filter + f.SetAttributeType(AttributeObject) + f.SetKey("a") + f.SetMatcher(MatchStringEqual) + f.SetValue("xxx") - r := newRecord(ActionDeny, OperationUnknown, tgt) - r.AddFilter(HeaderFromObject, MatchStringEqual, "a", "xxx") - tb.AddRecord(r) + var r1 Record + r1.SetAction(ActionDeny) + r1.SetTargets([]Target{tgt}) + r1.SetFilters([]Filter{f}) - r = newRecord(ActionDeny, OperationUnknown, tgt) - r.AddFilter(HeaderFromRequest, MatchStringNotEqual, "b", "yyy") - tb.AddRecord(r) + f.SetAttributeType(AttributeAPIRequest) + f.SetKey("b") + f.SetMatcher(MatchStringNotEqual) + f.SetValue("yyy") - tb.AddRecord(newRecord(ActionAllow, OperationUnknown, tgt)) + var r2 Record + r1.CopyTo(&r2) + r2.SetFilters([]Filter{f}) + + var r3 Record + r3.SetAction(ActionAllow) + r3.SetTargets([]Target{tgt}) + + var tb Table + tb.SetRecords([]Record{r1, r2, r3}) v := NewValidator() - vu := newValidationUnit(RoleOthers, nil, tb) + vu := newValidationUnit(RoleOthers, nil, &tb) hs := headers{} vu.hdrSrc = &hs @@ -64,15 +80,30 @@ func TestFilterMatch(t *testing.T) { }) t.Run("all filters must match", func(t *testing.T) { - tb := NewTable() - r := newRecord(ActionDeny, OperationUnknown, tgt) - r.AddFilter(HeaderFromObject, MatchStringEqual, "a", "xxx") - r.AddFilter(HeaderFromRequest, MatchStringEqual, "b", "yyy") - tb.AddRecord(r) - tb.AddRecord(newRecord(ActionAllow, OperationUnknown, tgt)) + fs := make([]Filter, 2) + fs[0].SetAttributeType(AttributeObject) + fs[0].SetKey("a") + fs[0].SetMatcher(MatchStringEqual) + fs[0].SetValue("xxx") + fs[1].SetAttributeType(AttributeAPIRequest) + fs[1].SetKey("b") + fs[1].SetMatcher(MatchStringEqual) + fs[1].SetValue("yyy") + + var r1 Record + r1.SetAction(ActionDeny) + r1.SetTargets([]Target{tgt}) + r1.SetFilters(fs) + + var r2 Record + r2.SetAction(ActionAllow) + r2.SetTargets([]Target{tgt}) + + var tb Table + tb.SetRecords([]Record{r1, r2}) v := NewValidator() - vu := newValidationUnit(RoleOthers, nil, tb) + vu := newValidationUnit(RoleOthers, nil, &tb) hs := headers{} vu.hdrSrc = &hs @@ -87,19 +118,35 @@ func TestFilterMatch(t *testing.T) { }) t.Run("filters with unknown type are skipped", func(t *testing.T) { - tb := NewTable() - r := newRecord(ActionDeny, OperationUnknown, tgt) - r.AddFilter(HeaderTypeUnknown, MatchStringEqual, "a", "xxx") - tb.AddRecord(r) + var f Filter + f.SetAttributeType(0) + f.SetKey("a") + f.SetMatcher(MatchStringEqual) + f.SetValue("xxx") + + var r1 Record + r1.SetAction(ActionDeny) + r1.SetTargets([]Target{tgt}) + r1.SetFilters([]Filter{f}) + + f.SetAttributeType(0xFF) + f.SetKey("b") + f.SetValue("yyy") - r = newRecord(ActionDeny, OperationUnknown, tgt) - r.AddFilter(0xFF, MatchStringEqual, "b", "yyy") - tb.AddRecord(r) + var r2 Record + r2.SetAction(ActionDeny) + r2.SetTargets([]Target{tgt}) + r2.SetFilters([]Filter{f}) - tb.AddRecord(newRecord(ActionDeny, OperationUnknown, tgt)) + var r3 Record + r2.SetAction(ActionDeny) + r2.SetTargets([]Target{tgt}) + + var tb Table + tb.SetRecords([]Record{r1, r2, r3}) v := NewValidator() - vu := newValidationUnit(RoleOthers, nil, tb) + vu := newValidationUnit(RoleOthers, nil, &tb) hs := headers{} vu.hdrSrc = &hs @@ -114,14 +161,26 @@ func TestFilterMatch(t *testing.T) { }) t.Run("filters with match function are skipped", func(t *testing.T) { - tb := NewTable() - r := newRecord(ActionAllow, OperationUnknown, tgt) - r.AddFilter(HeaderFromObject, 0xFF, "a", "xxx") - tb.AddRecord(r) - tb.AddRecord(newRecord(ActionDeny, OperationUnknown, tgt)) + var f Filter + f.SetAttributeType(AttributeObject) + f.SetKey("a") + f.SetMatcher(0xFF) + f.SetValue("xxx") + + var r1 Record + r1.SetAction(ActionAllow) + r1.SetTargets([]Target{tgt}) + r1.SetFilters([]Filter{f}) + + var r2 Record + r2.SetAction(ActionDeny) + r2.SetTargets([]Target{tgt}) + + var tb Table + tb.SetRecords([]Record{r1, r2}) v := NewValidator() - vu := newValidationUnit(RoleOthers, nil, tb) + vu := newValidationUnit(RoleOthers, nil, &tb) hs := headers{} vu.hdrSrc = &hs @@ -133,37 +192,51 @@ func TestFilterMatch(t *testing.T) { } func TestOperationMatch(t *testing.T) { - tgt := *NewTarget() + var tgt Target tgt.SetRole(RoleOthers) t.Run("single operation", func(t *testing.T) { - tb := NewTable() - tb.AddRecord(newRecord(ActionDeny, OperationPut, tgt)) - tb.AddRecord(newRecord(ActionAllow, OperationGet, tgt)) + var r1, r2 Record + r1.SetAction(ActionDeny) + r1.SetOperation(acl.OpObjectPut) + r1.SetTargets([]Target{tgt}) + r2.SetAction(ActionAllow) + r2.SetOperation(acl.OpObjectGet) + r2.SetTargets([]Target{tgt}) + + var tb Table + tb.SetRecords([]Record{r1, r2}) v := NewValidator() - vu := newValidationUnit(RoleOthers, nil, tb) + vu := newValidationUnit(RoleOthers, nil, &tb) - vu.op = OperationPut + vu.op = acl.OpObjectPut checkAction(t, ActionDeny, v, vu) - vu.op = OperationGet + vu.op = acl.OpObjectGet checkAction(t, ActionAllow, v, vu) }) t.Run("unknown operation", func(t *testing.T) { - tb := NewTable() - tb.AddRecord(newRecord(ActionDeny, OperationUnknown, tgt)) - tb.AddRecord(newRecord(ActionAllow, OperationGet, tgt)) + var r1, r2 Record + r1.SetAction(ActionDeny) + r1.SetOperation(0) + r1.SetTargets([]Target{tgt}) + r2.SetAction(ActionAllow) + r2.SetOperation(acl.OpObjectGet) + r2.SetTargets([]Target{tgt}) + + var tb Table + tb.SetRecords([]Record{r1, r2}) v := NewValidator() - vu := newValidationUnit(RoleOthers, nil, tb) + vu := newValidationUnit(RoleOthers, nil, &tb) // TODO discuss if both next tests should result in DENY - vu.op = OperationPut + vu.op = acl.OpObjectPut checkDefaultAction(t, v, vu) - vu.op = OperationGet + vu.op = acl.OpObjectGet checkAction(t, ActionAllow, v, vu) }) } @@ -171,52 +244,55 @@ func TestOperationMatch(t *testing.T) { func TestTargetMatches(t *testing.T) { pubs := makeKeys(t, 3) - tgt1 := NewTarget() - tgt1.SetBinaryKeys(pubs[0:2]) + var tgt1 Target + tgt1.SetPublicKeys(pubs[0:2]) tgt1.SetRole(RoleUser) - tgt2 := NewTarget() + var tgt2 Target tgt2.SetRole(RoleOthers) - r := NewRecord() - r.SetTargets(*tgt1, *tgt2) + var r Record + r.SetTargets([]Target{tgt1, tgt2}) u := newValidationUnit(RoleUser, pubs[0], nil) - require.True(t, targetMatches(u, r)) + require.True(t, targetMatches(u, &r)) u = newValidationUnit(RoleUser, pubs[2], nil) - require.False(t, targetMatches(u, r)) + require.False(t, targetMatches(u, &r)) - u = newValidationUnit(RoleUnknown, pubs[1], nil) - require.True(t, targetMatches(u, r)) + u = newValidationUnit(0, pubs[1], nil) + require.True(t, targetMatches(u, &r)) u = newValidationUnit(RoleOthers, pubs[2], nil) - require.True(t, targetMatches(u, r)) + require.True(t, targetMatches(u, &r)) u = newValidationUnit(RoleSystem, pubs[2], nil) - require.False(t, targetMatches(u, r)) + require.False(t, targetMatches(u, &r)) } func TestSystemRoleModificationIgnored(t *testing.T) { - tgt := *NewTarget() + var tgt Target tgt.SetRole(RoleSystem) - operations := []Operation{ - OperationPut, - OperationGet, - OperationDelete, - OperationHead, - OperationRange, - OperationRangeHash, + operations := []acl.Op{ + acl.OpObjectPut, + acl.OpObjectGet, + acl.OpObjectDelete, + acl.OpObjectHead, + acl.OpObjectRange, + acl.OpObjectHash, } - tb := NewTable() + var tb Table for _, operation := range operations { - tb.AddRecord(newRecord(ActionDeny, operation, tgt)) + var r Record + r.SetAction(ActionDeny) + r.SetOperation(operation) + r.SetTargets([]Target{tgt}) } v := NewValidator() - vu := newValidationUnit(RoleSystem, nil, tb) + vu := newValidationUnit(RoleSystem, nil, &tb) for _, operation := range operations { vu.op = operation @@ -258,25 +334,17 @@ func makeHeaders(kv ...string) []Header { return hs } -func (h headers) HeadersOfType(ht FilterHeaderType) ([]Header, bool) { +func (h headers) HeadersOfType(ht AttributeType) ([]Header, bool) { switch ht { - case HeaderFromRequest: + case AttributeAPIRequest: return h.req, true - case HeaderFromObject: + case AttributeObject: return h.obj, true default: return nil, false } } -func newRecord(a Action, op Operation, tgt ...Target) *Record { - r := NewRecord() - r.SetAction(a) - r.SetOperation(op) - r.SetTargets(tgt...) - return r -} - func newValidationUnit(role Role, key []byte, table *Table) *ValidationUnit { return new(ValidationUnit). WithRole(role). @@ -352,11 +420,14 @@ func TestNumericRules(t *testing.T) { {MatchNumLE, "111111111111111111111111111111", "111111111111111111111111111110", false}, {MatchNumLE, "-111111111111111111111111111110", "-111111111111111111111111111111", false}, } { - var rec Record - rec.AddObjectAttributeFilter(tc.m, "any_key", tc.f) + var f Filter + f.SetAttributeType(AttributeObject) + f.SetKey("any_key") + f.SetMatcher(tc.m) + f.SetValue(tc.f) hs := headers{obj: makeHeaders("any_key", tc.h)} - v := matchFilters(hs, rec.filters) + v := matchFilters(hs, []Filter{f}) if tc.exp { require.Zero(t, v, tc) } else { @@ -371,23 +442,29 @@ func TestAbsenceRules(t *testing.T) { "key2", "val2", )} - var r Record - - r.AddObjectAttributeFilter(MatchStringEqual, "key2", "val2") - r.AddObjectAttributeFilter(MatchNotPresent, "key1", "") - v := matchFilters(hs, r.filters) + fs := make([]Filter, 2) + fs[0].SetAttributeType(AttributeObject) + fs[0].SetKey("key2") + fs[0].SetMatcher(MatchStringEqual) + fs[0].SetValue("val2") + fs[1].SetAttributeType(AttributeObject) + fs[1].SetKey("key1") + fs[1].SetMatcher(MatchNotPresent) + fs[1].SetValue("") + + v := matchFilters(hs, fs) require.Positive(t, v) - r.filters = r.filters[:0] - r.AddObjectAttributeFilter(MatchStringEqual, "key1", "val1") - r.AddObjectAttributeFilter(MatchNotPresent, "key2", "") - v = matchFilters(hs, r.filters) + fs[0].SetKey("key1") + fs[0].SetValue("val1") + fs[1].SetKey("key2") + v = matchFilters(hs, fs) require.Positive(t, v) - r.filters = r.filters[:0] - r.AddObjectAttributeFilter(MatchStringEqual, "key1", "val1") - r.AddObjectAttributeFilter(MatchStringEqual, "key2", "val2") - r.AddObjectAttributeFilter(MatchNotPresent, "key3", "") - v = matchFilters(hs, r.filters) + fs = []Filter{fs[0], fs[0], fs[1]} + fs[1].SetKey("key2") + fs[1].SetValue("val2") + fs[2].SetKey("key3") + v = matchFilters(hs, fs) require.Zero(t, v) } diff --git a/internal/proto/encoding.go b/internal/proto/encoding.go new file mode 100644 index 000000000..7f8e383b0 --- /dev/null +++ b/internal/proto/encoding.go @@ -0,0 +1,192 @@ +/* +Package proto contains helper functions in addition to the ones from +[google.golang.org/protobuf/encoding/protowire]. +*/ + +package proto + +import ( + "encoding/binary" + "math" + "reflect" + + "google.golang.org/protobuf/encoding/protowire" +) + +// TODO: docs +type Message interface { + MarshaledSize() int + MarshalStable(b []byte) +} + +func SizeVarint[integer uint64 | int64 | uint32 | int32](num protowire.Number, v integer) int { + if v == 0 { + return 0 + } + return protowire.SizeTag(num) + protowire.SizeVarint(uint64(v)) +} + +func MarshalVarint[integer uint64 | int64 | uint32 | int32](b []byte, num protowire.Number, v integer) int { + if v == 0 { + return 0 + } + off := binary.PutUvarint(b, protowire.EncodeTag(num, protowire.VarintType)) + return off + binary.PutUvarint(b[off:], uint64(v)) +} + +func SizeBool(num protowire.Number, v bool) int { + return SizeVarint(num, protowire.EncodeBool(v)) +} + +func MarshalBool(b []byte, num protowire.Number, v bool) int { + return MarshalVarint(b, num, protowire.EncodeBool(v)) +} + +func SizeBytes[bytesOrString []byte | string](num protowire.Number, v bytesOrString) int { + ln := len(v) + if ln == 0 { + return 0 + } + return protowire.SizeTag(num) + protowire.SizeBytes(ln) +} + +func MarshalBytes[bytesOrString []byte | string](b []byte, num protowire.Number, v bytesOrString) int { + if len(v) == 0 { + return 0 + } + off := binary.PutUvarint(b, protowire.EncodeTag(num, protowire.BytesType)) + off += binary.PutUvarint(b[off:], uint64(len(v))) + return off + copy(b[off:], v) +} + +func SizeFixed32(num protowire.Number, v uint32) int { + if v == 0 { + return 0 + } + return protowire.SizeTag(num) + protowire.SizeFixed32() +} + +func MarshalFixed32(b []byte, num protowire.Number, v uint32) int { + if v == 0 { + return 0 + } + off := binary.PutUvarint(b, protowire.EncodeTag(num, protowire.Fixed32Type)) + binary.LittleEndian.PutUint32(b[off:], v) + return off + protowire.SizeFixed32() +} + +func SizeFloat32(num protowire.Number, v float32) int { + return SizeFixed32(num, math.Float32bits(v)) +} + +func MarshalFloat32(b []byte, num protowire.Number, v float32) int { + return MarshalFixed32(b, num, math.Float32bits(v)) +} + +func SizeFixed64(num protowire.Number, v uint64) int { + if v == 0 { + return 0 + } + return protowire.SizeTag(num) + protowire.SizeFixed64() +} + +func MarshalFixed64(b []byte, num protowire.Number, v uint64) int { + if v == 0 { + return 0 + } + off := binary.PutUvarint(b, protowire.EncodeTag(num, protowire.Fixed64Type)) + binary.LittleEndian.PutUint64(b[off:], v) + return off + protowire.SizeFixed64() +} + +func SizeFloat64(num protowire.Number, v float64) int { + return SizeFixed64(num, math.Float64bits(v)) +} + +func MarshalFloat64(b []byte, num protowire.Number, v float64) int { + return MarshalFixed64(b, num, math.Float64bits(v)) +} + +func SizeNested(num protowire.Number, v Message) int { + if v == nil || reflect.ValueOf(v).IsNil() { + return 0 + } + sz := v.MarshaledSize() + if sz == 0 { + return 0 + } + return protowire.SizeTag(num) + protowire.SizeBytes(sz) +} + +func MarshalNested(b []byte, num protowire.Number, v Message) int { + if v == nil || reflect.ValueOf(v).IsNil() { + return 0 + } + sz := v.MarshaledSize() + if sz == 0 { + return 0 + } + off := binary.PutUvarint(b, protowire.EncodeTag(num, protowire.BytesType)) + off += binary.PutUvarint(b[off:], uint64(sz)) + v.MarshalStable(b[off:]) + return off + sz +} + +func sizeRepeatedVarint(v []uint64) int { + var sz int + for i := range v { + // packed https://protobuf.dev/programming-guides/encoding/#packed + sz += protowire.SizeVarint(v[i]) + } + return sz +} + +func SizeRepeatedVarint(num protowire.Number, v []uint64) int { + if len(v) == 0 { + return 0 + } + return protowire.SizeTag(num) + protowire.SizeBytes(sizeRepeatedVarint(v)) +} + +func MarshalRepeatedVarint(b []byte, num protowire.Number, v []uint64) int { + if len(v) == 0 { + return 0 + } + off := binary.PutUvarint(b, protowire.EncodeTag(num, protowire.BytesType)) + off += binary.PutUvarint(b[off:], uint64(sizeRepeatedVarint(v))) + for i := range v { + off += binary.PutUvarint(b[off:], v[i]) + } + return off +} + +func sizeRepeatedBytes[bytesOrString []byte | string](num protowire.Number, v []bytesOrString) int { + var sz int + tagSz := protowire.SizeTag(num) + for i := range v { + // non-packed https://protobuf.dev/programming-guides/encoding/#packed + sz += tagSz + protowire.SizeBytes(len(v[i])) + } + return sz +} + +func SizeRepeatedBytes[bytesOrString []byte | string](num protowire.Number, v []bytesOrString) int { + if len(v) == 0 { + return 0 + } + return sizeRepeatedBytes(num, v) +} + +func MarshalRepeatedBytes[bytesOrString []byte | string](b []byte, num protowire.Number, v []bytesOrString) int { + if len(v) == 0 { + return 0 + } + var off int + tag := protowire.EncodeTag(num, protowire.BytesType) + for i := range v { + off += binary.PutUvarint(b[off:], tag) + off += binary.PutUvarint(b[off:], uint64(len(v[i]))) + off += copy(b[off:], v[i]) + } + return off +} diff --git a/internal/proto/encoding_test.go b/internal/proto/encoding_test.go new file mode 100644 index 000000000..0a84a7132 --- /dev/null +++ b/internal/proto/encoding_test.go @@ -0,0 +1,232 @@ +package proto_test + +import ( + "math" + "testing" + + "github.com/nspcc-dev/neofs-sdk-go/internal/proto" + "github.com/stretchr/testify/require" + "google.golang.org/protobuf/encoding/protowire" +) + +func TestVarint(t *testing.T) { + const anyFieldNum = 123 + require.Zero(t, proto.SizeVarint(anyFieldNum, int32(0))) + require.Zero(t, proto.MarshalVarint(nil, anyFieldNum, int32(0))) + + const v = int32(42) + sz := proto.SizeVarint(anyFieldNum, v) + b := make([]byte, sz) + require.EqualValues(t, sz, proto.MarshalVarint(b, anyFieldNum, v)) + + num, typ, tagLn := protowire.ConsumeTag(b) + require.Positive(t, tagLn, protowire.ParseError(tagLn)) + require.EqualValues(t, anyFieldNum, num) + require.EqualValues(t, protowire.VarintType, typ) + + res, _ := protowire.ConsumeVarint(b[tagLn:]) + require.EqualValues(t, v, res) +} + +func TestBool(t *testing.T) { + const anyFieldNum = 123 + require.Zero(t, proto.SizeBool(anyFieldNum, false)) + require.Zero(t, proto.MarshalBool(nil, anyFieldNum, false)) + + const v = true + sz := proto.SizeBool(anyFieldNum, v) + b := make([]byte, sz) + require.EqualValues(t, sz, proto.MarshalBool(b, anyFieldNum, v)) + + num, typ, tagLn := protowire.ConsumeTag(b) + require.Positive(t, tagLn, protowire.ParseError(tagLn)) + require.EqualValues(t, anyFieldNum, num) + require.EqualValues(t, protowire.VarintType, typ) + + res, _ := protowire.ConsumeVarint(b[tagLn:]) + require.EqualValues(t, 1, res) +} + +func TestFixed32(t *testing.T) { + const anyFieldNum = 123 + require.Zero(t, proto.SizeFixed32(anyFieldNum, 0)) + require.Zero(t, proto.MarshalFixed32(nil, anyFieldNum, 0)) + + const v = 42 + sz := proto.SizeFixed32(anyFieldNum, v) + b := make([]byte, sz) + require.EqualValues(t, sz, proto.MarshalFixed32(b, anyFieldNum, v)) + + num, typ, tagLn := protowire.ConsumeTag(b) + require.Positive(t, tagLn, protowire.ParseError(tagLn)) + require.EqualValues(t, anyFieldNum, num) + require.EqualValues(t, protowire.Fixed32Type, typ) + + res, _ := protowire.ConsumeFixed32(b[tagLn:]) + require.EqualValues(t, v, res) +} + +func TestFloat32(t *testing.T) { + const anyFieldNum = 123 + require.Zero(t, proto.SizeFloat32(anyFieldNum, 0.0)) + require.Zero(t, proto.MarshalFloat32(nil, anyFieldNum, 0.0)) + + const v = float32(1234.5678) + sz := proto.SizeFloat32(anyFieldNum, v) + b := make([]byte, sz) + require.EqualValues(t, sz, proto.MarshalFloat32(b, anyFieldNum, v)) + + num, typ, tagLn := protowire.ConsumeTag(b) + require.Positive(t, tagLn, protowire.ParseError(tagLn)) + require.EqualValues(t, anyFieldNum, num) + require.EqualValues(t, protowire.Fixed32Type, typ) + + res, _ := protowire.ConsumeFixed32(b[tagLn:]) + require.EqualValues(t, v, math.Float32frombits(res)) +} + +func TestFixed64(t *testing.T) { + const anyFieldNum = 123 + require.Zero(t, proto.SizeFixed64(anyFieldNum, 0)) + require.Zero(t, proto.MarshalFixed64(nil, anyFieldNum, 0)) + + const v = 42 + sz := proto.SizeFixed64(anyFieldNum, v) + b := make([]byte, sz) + require.EqualValues(t, sz, proto.MarshalFixed64(b, anyFieldNum, v)) + + num, typ, tagLn := protowire.ConsumeTag(b) + require.Positive(t, tagLn, protowire.ParseError(tagLn)) + require.EqualValues(t, anyFieldNum, num) + require.EqualValues(t, protowire.Fixed64Type, typ) + + res, _ := protowire.ConsumeFixed64(b[tagLn:]) + require.EqualValues(t, v, res) +} + +func TestFloat64(t *testing.T) { + const anyFieldNum = 123 + require.Zero(t, proto.SizeFloat64(anyFieldNum, 0.0)) + require.Zero(t, proto.MarshalFloat64(nil, anyFieldNum, 0.0)) + + const v = 1234.5678 + sz := proto.SizeFloat64(anyFieldNum, v) + b := make([]byte, sz) + require.EqualValues(t, sz, proto.MarshalFloat64(b, anyFieldNum, v)) + + num, typ, tagLn := protowire.ConsumeTag(b) + require.Positive(t, tagLn, protowire.ParseError(tagLn)) + require.EqualValues(t, anyFieldNum, num) + require.EqualValues(t, protowire.Fixed64Type, typ) + + res, _ := protowire.ConsumeFixed64(b[tagLn:]) + require.EqualValues(t, v, math.Float64frombits(res)) +} + +func TestBytes(t *testing.T) { + const anyFieldNum = 123 + require.Zero(t, proto.SizeBytes(anyFieldNum, []byte(nil))) + require.Zero(t, proto.MarshalBytes(nil, anyFieldNum, []byte(nil))) + require.Zero(t, proto.SizeBytes(anyFieldNum, []byte{})) + require.Zero(t, proto.MarshalBytes(nil, anyFieldNum, []byte{})) + require.Zero(t, proto.SizeBytes(anyFieldNum, "")) + require.Zero(t, proto.MarshalBytes(nil, anyFieldNum, "")) + + const v = "Hello, world!" + sz := proto.SizeBytes(anyFieldNum, v) + b := make([]byte, sz) + require.EqualValues(t, sz, proto.MarshalBytes(b, anyFieldNum, v)) + + num, typ, tagLn := protowire.ConsumeTag(b) + require.Positive(t, tagLn, protowire.ParseError(tagLn)) + require.EqualValues(t, anyFieldNum, num) + require.EqualValues(t, protowire.BytesType, typ) + + res, _ := protowire.ConsumeBytes(b[tagLn:]) + require.EqualValues(t, v, res) +} + +type nestedMessageStub string + +func (x *nestedMessageStub) MarshaledSize() int { return len(*x) } +func (x *nestedMessageStub) MarshalStable(b []byte) { copy(b, *x) } + +func TestNested(t *testing.T) { + const anyFieldNum = 123 + require.Zero(t, proto.SizeNested(anyFieldNum, nil)) + require.Zero(t, proto.MarshalNested(nil, anyFieldNum, nil)) + require.Zero(t, proto.SizeNested(anyFieldNum, (*nestedMessageStub)(nil))) + require.Zero(t, proto.MarshalNested(nil, anyFieldNum, (*nestedMessageStub)(nil))) + + v := nestedMessageStub("Hello, world!") + sz := proto.SizeNested(anyFieldNum, &v) + b := make([]byte, sz) + require.EqualValues(t, sz, proto.MarshalNested(b, anyFieldNum, &v)) + + num, typ, tagLn := protowire.ConsumeTag(b) + require.Positive(t, tagLn, protowire.ParseError(tagLn)) + require.EqualValues(t, anyFieldNum, num) + require.EqualValues(t, protowire.BytesType, typ) + + res, _ := protowire.ConsumeBytes(b[tagLn:]) + require.EqualValues(t, v, res) +} + +func TestRepeatedVarint(t *testing.T) { + const anyFieldNum = 123 + require.Zero(t, proto.SizeRepeatedVarint(anyFieldNum, nil)) + require.Zero(t, proto.MarshalRepeatedVarint(nil, anyFieldNum, nil)) + require.Zero(t, proto.SizeRepeatedVarint(anyFieldNum, []uint64{})) + require.Zero(t, proto.MarshalRepeatedVarint(nil, anyFieldNum, []uint64{})) + + v := []uint64{12, 345, 0, 67890} // unlike single varint, zero must be explicitly encoded + sz := proto.SizeRepeatedVarint(anyFieldNum, v) + b := make([]byte, sz) + require.EqualValues(t, sz, proto.MarshalRepeatedVarint(b, anyFieldNum, v)) + + num, typ, tagLn := protowire.ConsumeTag(b) + require.Positive(t, tagLn, protowire.ParseError(tagLn)) + require.EqualValues(t, anyFieldNum, num) + require.EqualValues(t, protowire.BytesType, typ) + + b, _ = protowire.ConsumeBytes(b[tagLn:]) + var res []uint64 + for len(b) > 0 { + i, ln := protowire.ConsumeVarint(b) + require.Positive(t, tagLn, protowire.ParseError(ln)) + res = append(res, i) + b = b[ln:] + } + require.Equal(t, v, res) +} + +func TestRepeatedBytes(t *testing.T) { + const anyFieldNum = 123 + require.Zero(t, proto.SizeRepeatedBytes(anyFieldNum, [][]byte(nil))) + require.Zero(t, proto.MarshalRepeatedBytes(nil, anyFieldNum, [][]byte(nil))) + require.Zero(t, proto.SizeRepeatedBytes(anyFieldNum, [][]byte{})) + require.Zero(t, proto.MarshalRepeatedBytes(nil, anyFieldNum, [][]byte{})) + require.Zero(t, proto.SizeRepeatedBytes(anyFieldNum, []string(nil))) + require.Zero(t, proto.MarshalRepeatedBytes(nil, anyFieldNum, []string(nil))) + require.Zero(t, proto.SizeRepeatedBytes(anyFieldNum, []string{})) + require.Zero(t, proto.MarshalRepeatedBytes(nil, anyFieldNum, []string{})) + + v := []string{"Hello", "World", "", "Bob", "Alice"} // unlike single byte array, zero must be explicitly encoded + sz := proto.SizeRepeatedBytes(anyFieldNum, v) + b := make([]byte, sz) + require.EqualValues(t, sz, proto.MarshalRepeatedBytes(b, anyFieldNum, v)) + + var res []string + for len(b) > 0 { + num, typ, tagLn := protowire.ConsumeTag(b) + require.Positive(t, tagLn, protowire.ParseError(tagLn)) + require.EqualValues(t, anyFieldNum, num) + require.EqualValues(t, protowire.BytesType, typ) + + bs, ln := protowire.ConsumeBytes(b[tagLn:]) + require.Positive(t, tagLn, protowire.ParseError(ln)) + res = append(res, string(bs)) + b = b[tagLn+ln:] + } + require.Equal(t, v, res) +} diff --git a/netmap/aggregator.go b/netmap/aggregator.go index 3365fe315..e3a5421b6 100644 --- a/netmap/aggregator.go +++ b/netmap/aggregator.go @@ -56,7 +56,7 @@ var ( // capacity and price. func newWeightFunc(capNorm, priceNorm normalizer) weightFunc { return func(n NodeInfo) float64 { - return capNorm.Normalize(float64(n.capacity())) * priceNorm.Normalize(float64(n.Price())) + return capNorm.Normalize(float64(n.Capacity())) * priceNorm.Normalize(float64(n.Price())) } } diff --git a/netmap/context.go b/netmap/context.go index e007416f1..c648dd0ff 100644 --- a/netmap/context.go +++ b/netmap/context.go @@ -4,7 +4,6 @@ import ( "errors" "github.com/nspcc-dev/hrw/v2" - "github.com/nspcc-dev/neofs-api-go/v2/netmap" ) // context of a placement build process. @@ -13,10 +12,10 @@ type context struct { netMap NetMap // cache of processed filters - processedFilters map[string]*netmap.Filter + processedFilters map[string]*Filter // cache of processed selectors - processedSelectors map[string]*netmap.Selector + processedSelectors map[string]*Selector // stores results of selector processing selections map[string][]nodes @@ -52,8 +51,8 @@ var ( func newContext(nm NetMap) *context { return &context{ netMap: nm, - processedFilters: make(map[string]*netmap.Filter), - processedSelectors: make(map[string]*netmap.Selector), + processedFilters: make(map[string]*Filter), + processedSelectors: make(map[string]*Selector), selections: make(map[string][]nodes), numCache: make(map[string]uint64), @@ -81,7 +80,7 @@ func defaultWeightFunc(ns nodes) weightFunc { min := newMinAgg() for i := range ns { - mean.Add(float64(ns[i].capacity())) + mean.Add(float64(ns[i].Capacity())) min.Add(float64(ns[i].Price())) } diff --git a/netmap/example_test.go b/netmap/example_test.go index bb87378f8..93f912641 100644 --- a/netmap/example_test.go +++ b/netmap/example_test.go @@ -3,24 +3,22 @@ package netmap_test import ( "fmt" - apiGoNetmap "github.com/nspcc-dev/neofs-api-go/v2/netmap" + apinetmap "github.com/nspcc-dev/neofs-sdk-go/api/netmap" "github.com/nspcc-dev/neofs-sdk-go/netmap" ) // Instances can be also used to process NeoFS API V2 protocol messages with [https://github.com/nspcc-dev/neofs-api] package. func ExampleNodeInfo_marshalling() { - // import apiGoNetmap "github.com/nspcc-dev/neofs-api-go/v2/netmap" - // On the client side. var info netmap.NodeInfo - var msg apiGoNetmap.NodeInfo + var msg apinetmap.NodeInfo info.WriteToV2(&msg) // *send message* // On the server side. - _ = info.ReadFromV2(msg) + _ = info.ReadFromV2(&msg) } // When forming information about storage node to be registered the NeoFS diff --git a/netmap/filter.go b/netmap/filter.go index bd229cad6..3d50f96ba 100644 --- a/netmap/filter.go +++ b/netmap/filter.go @@ -3,8 +3,6 @@ package netmap import ( "fmt" "strconv" - - "github.com/nspcc-dev/neofs-api-go/v2/netmap" ) // mainFilterName is a name of the filter @@ -15,15 +13,15 @@ const mainFilterName = "*" func (c *context) processFilters(p PlacementPolicy) error { for i := range p.filters { if err := c.processFilter(p.filters[i], true); err != nil { - return fmt.Errorf("process filter #%d (%s): %w", i, p.filters[i].GetName(), err) + return fmt.Errorf("process filter #%d (%s): %w", i, p.filters[i].Name(), err) } } return nil } -func (c *context) processFilter(f netmap.Filter, top bool) error { - fName := f.GetName() +func (c *context) processFilter(f Filter, top bool) error { + fName := f.Name() if fName == mainFilterName { return fmt.Errorf("%w: '%s' is reserved", errInvalidFilterName, mainFilterName) } @@ -36,10 +34,10 @@ func (c *context) processFilter(f netmap.Filter, top bool) error { return errFilterNotFound } - inner := f.GetFilters() + inner := f.SubFilters() - switch op := f.GetOp(); op { - case netmap.AND, netmap.OR: + switch op := f.Op(); op { + case FilterOpAND, FilterOpOR: for i := range inner { if err := c.processFilter(inner[i], false); err != nil { return fmt.Errorf("process inner filter #%d: %w", i, err) @@ -53,12 +51,12 @@ func (c *context) processFilter(f netmap.Filter, top bool) error { } switch op { - case netmap.EQ, netmap.NE: - case netmap.GT, netmap.GE, netmap.LT, netmap.LE: - val := f.GetValue() + case FilterOpEQ, FilterOpNE: + case FilterOpGT, FilterOpGE, FilterOpLT, FilterOpLE: + val := f.Value() n, err := strconv.ParseUint(val, 10, 64) if err != nil { - return fmt.Errorf("%w: '%s'", errInvalidNumber, f.GetValue()) + return fmt.Errorf("%w: '%s'", errInvalidNumber, f.Value()) } c.numCache[val] = n @@ -77,46 +75,46 @@ func (c *context) processFilter(f netmap.Filter, top bool) error { // match matches f against b. It returns no errors because // filter should have been parsed during context creation // and missing node properties are considered as a regular fail. -func (c *context) match(f *netmap.Filter, b NodeInfo) bool { - switch f.GetOp() { - case netmap.AND, netmap.OR: - inner := f.GetFilters() +func (c *context) match(f *Filter, b NodeInfo) bool { + switch f.Op() { + case FilterOpAND, FilterOpOR: + inner := f.SubFilters() for i := range inner { fSub := &inner[i] - if name := inner[i].GetName(); name != "" { + if name := inner[i].Name(); name != "" { fSub = c.processedFilters[name] } ok := c.match(fSub, b) - if ok == (f.GetOp() == netmap.OR) { + if ok == (f.Op() == FilterOpOR) { return ok } } - return f.GetOp() == netmap.AND + return f.Op() == FilterOpAND default: return c.matchKeyValue(f, b) } } -func (c *context) matchKeyValue(f *netmap.Filter, b NodeInfo) bool { - switch op := f.GetOp(); op { - case netmap.EQ: - return b.Attribute(f.GetKey()) == f.GetValue() - case netmap.NE: - return b.Attribute(f.GetKey()) != f.GetValue() +func (c *context) matchKeyValue(f *Filter, b NodeInfo) bool { + switch op := f.Op(); op { + case FilterOpEQ: + return b.Attribute(f.Key()) == f.Value() + case FilterOpNE: + return b.Attribute(f.Key()) != f.Value() default: var attr uint64 - switch f.GetKey() { + switch f.Key() { case attrPrice: attr = b.Price() case attrCapacity: - attr = b.capacity() + attr = b.Capacity() default: var err error - attr, err = strconv.ParseUint(b.Attribute(f.GetKey()), 10, 64) + attr, err = strconv.ParseUint(b.Attribute(f.Key()), 10, 64) if err != nil { // Note: because filters are somewhat independent from nodes attributes, // We don't report an error here, and fail filter instead. @@ -125,14 +123,14 @@ func (c *context) matchKeyValue(f *netmap.Filter, b NodeInfo) bool { } switch op { - case netmap.GT: - return attr > c.numCache[f.GetValue()] - case netmap.GE: - return attr >= c.numCache[f.GetValue()] - case netmap.LT: - return attr < c.numCache[f.GetValue()] - case netmap.LE: - return attr <= c.numCache[f.GetValue()] + case FilterOpGT: + return attr > c.numCache[f.Value()] + case FilterOpGE: + return attr >= c.numCache[f.Value()] + case FilterOpLT: + return attr < c.numCache[f.Value()] + case FilterOpLE: + return attr <= c.numCache[f.Value()] default: // do nothing and return false } diff --git a/netmap/filter_test.go b/netmap/filter_test.go index fc3007379..bb057d089 100644 --- a/netmap/filter_test.go +++ b/netmap/filter_test.go @@ -4,17 +4,16 @@ import ( "errors" "testing" - "github.com/nspcc-dev/neofs-api-go/v2/netmap" "github.com/stretchr/testify/require" ) func TestContext_ProcessFilters(t *testing.T) { fs := []Filter{ - newFilter("StorageSSD", "Storage", "SSD", netmap.EQ), - newFilter("GoodRating", "Rating", "4", netmap.GE), - newFilter("Main", "", "", netmap.AND, + newFilter("StorageSSD", "Storage", "SSD", FilterOpEQ), + newFilter("GoodRating", "Rating", "4", FilterOpGE), + newFilter("Main", "", "", FilterOpAND, newFilter("StorageSSD", "", "", 0), - newFilter("", "IntField", "123", netmap.LT), + newFilter("", "IntField", "123", FilterOpLT), newFilter("GoodRating", "", "", 0)), } @@ -23,11 +22,11 @@ func TestContext_ProcessFilters(t *testing.T) { require.NoError(t, c.processFilters(p)) require.Equal(t, 3, len(c.processedFilters)) for _, f := range fs { - require.Equal(t, f.m, *c.processedFilters[f.m.GetName()]) + require.Equal(t, f, *c.processedFilters[f.Name()]) } - require.Equal(t, uint64(4), c.numCache[fs[1].m.GetValue()]) - require.Equal(t, uint64(123), c.numCache[fs[2].m.GetFilters()[1].GetValue()]) + require.Equal(t, uint64(4), c.numCache[fs[1].Value()]) + require.Equal(t, uint64(123), c.numCache[fs[2].SubFilters()[1].Value()]) } func TestContext_ProcessFiltersInvalid(t *testing.T) { @@ -38,24 +37,24 @@ func TestContext_ProcessFiltersInvalid(t *testing.T) { }{ { "UnnamedTop", - newFilter("", "Storage", "SSD", netmap.EQ), + newFilter("", "Storage", "SSD", FilterOpEQ), errUnnamedTopFilter, }, { "InvalidReference", - newFilter("Main", "", "", netmap.AND, + newFilter("Main", "", "", FilterOpAND, newFilter("StorageSSD", "", "", 0)), errFilterNotFound, }, { "NonEmptyKeyed", - newFilter("Main", "Storage", "SSD", netmap.EQ, + newFilter("Main", "Storage", "SSD", FilterOpEQ, newFilter("StorageSSD", "", "", 0)), errNonEmptyFilters, }, { "InvalidNumber", - newFilter("Main", "Rating", "three", netmap.GE), + newFilter("Main", "Rating", "three", FilterOpGE), errInvalidNumber, }, { @@ -65,7 +64,7 @@ func TestContext_ProcessFiltersInvalid(t *testing.T) { }, { "InvalidName", - newFilter("*", "Rating", "3", netmap.GE), + newFilter("*", "Rating", "3", FilterOpGE), errInvalidFilterName, }, } @@ -84,12 +83,12 @@ func TestFilter_MatchSimple_InvalidOp(t *testing.T) { b.SetAttribute("Rating", "4") b.SetAttribute("Country", "Germany") - f := newFilter("Main", "Rating", "5", netmap.EQ) + f := newFilter("Main", "Rating", "5", FilterOpEQ) c := newContext(NetMap{}) p := newPlacementPolicy(1, nil, nil, []Filter{f}) require.NoError(t, c.processFilters(p)) // just for the coverage - f.m.SetOp(0) - require.False(t, c.match(&f.m, b)) + f.op = 0 + require.False(t, c.match(&f, b)) } diff --git a/netmap/helper_test.go b/netmap/helper_test.go index 0eff22af2..fe2ef9652 100644 --- a/netmap/helper_test.go +++ b/netmap/helper_test.go @@ -1,20 +1,13 @@ package netmap -import ( - "github.com/nspcc-dev/neofs-api-go/v2/netmap" -) - -func newFilter(name string, k, v string, op netmap.Operation, fs ...Filter) (f Filter) { - f.SetName(name) - f.m.SetKey(k) - f.m.SetOp(op) - f.m.SetValue(v) - inner := make([]netmap.Filter, len(fs)) - for i := range fs { - inner[i] = fs[i].m +func newFilter(name string, k, v string, op FilterOp, fs ...Filter) (f Filter) { + return Filter{ + name: name, + key: k, + op: op, + val: v, + subs: fs, } - f.m.SetFilters(inner) - return f } func newSelector(name string, attr string, count uint32, filter string, clause func(*Selector)) (s Selector) { diff --git a/netmap/netmap.go b/netmap/netmap.go index 834bcb29b..bb8849b16 100644 --- a/netmap/netmap.go +++ b/netmap/netmap.go @@ -1,11 +1,10 @@ package netmap import ( - "crypto/sha256" "fmt" "github.com/nspcc-dev/hrw/v2" - "github.com/nspcc-dev/neofs-api-go/v2/netmap" + "github.com/nspcc-dev/neofs-sdk-go/api/netmap" cid "github.com/nspcc-dev/neofs-sdk-go/container/id" oid "github.com/nspcc-dev/neofs-sdk-go/object/id" ) @@ -13,8 +12,8 @@ import ( // NetMap represents NeoFS network map. It includes information about all // storage nodes registered in NeoFS the network. // -// NetMap is mutually compatible with github.com/nspcc-dev/neofs-api-go/v2/netmap.NetMap -// message. See ReadFromV2 / WriteToV2 methods. +// NetMap is mutually compatible with [netmap.Netmap] message. See +// [NetMap.ReadFromV2] / [NetMap.WriteToV2] methods. // // Instances can be created using built-in var declaration. type NetMap struct { @@ -23,62 +22,66 @@ type NetMap struct { nodes []NodeInfo } -// ReadFromV2 reads NetMap from the netmap.NetMap message. Checks if the -// message conforms to NeoFS API V2 protocol. +// ReadFromV2 reads NetMap from the [netmap.Netmap] message. Returns an error if +// the message is malformed according to the NeoFS API V2 protocol. The message +// must not be nil. // -// See also WriteToV2. -func (m *NetMap) ReadFromV2(msg netmap.NetMap) error { +// ReadFromV2 is intended to be used by the NeoFS API V2 client/server +// implementation only and is not expected to be directly used by applications. +// +// See also [NetMap.WriteToV2]. +func (m *NetMap) ReadFromV2(msg *netmap.Netmap) error { var err error - nodes := msg.Nodes() - - if nodes == nil { + if len(msg.Nodes) == 0 { m.nodes = nil } else { - m.nodes = make([]NodeInfo, len(nodes)) - - for i := range nodes { - err = m.nodes[i].ReadFromV2(nodes[i]) + m.nodes = make([]NodeInfo, len(msg.Nodes)) + for i := range msg.Nodes { + err = m.nodes[i].ReadFromV2(msg.Nodes[i]) if err != nil { - return fmt.Errorf("invalid node info: %w", err) + return fmt.Errorf("invalid node info #%d: %w", i, err) } } } - m.epoch = msg.Epoch() + m.epoch = msg.Epoch return nil } -// WriteToV2 writes NetMap to the netmap.NetMap message. The message -// MUST NOT be nil. +// WriteToV2 writes NetMap to the [netmap.Netmap] message of the NeoFS API +// protocol. // -// See also ReadFromV2. -func (m NetMap) WriteToV2(msg *netmap.NetMap) { - var nodes []netmap.NodeInfo - +// WriteToV2 is intended to be used by the NeoFS API V2 client/server +// implementation only and is not expected to be directly used by applications. +// +// See also [NetMap.ReadFromV2]. +func (m NetMap) WriteToV2(msg *netmap.Netmap) { if m.nodes != nil { - nodes = make([]netmap.NodeInfo, len(m.nodes)) - + msg.Nodes = make([]*netmap.NodeInfo, len(m.nodes)) for i := range m.nodes { - m.nodes[i].WriteToV2(&nodes[i]) + if !isEmptyNodeInfo(m.nodes[i]) { + msg.Nodes[i] = new(netmap.NodeInfo) + m.nodes[i].WriteToV2(msg.Nodes[i]) + } } - - msg.SetNodes(nodes) + } else { + msg.Nodes = nil } - msg.SetEpoch(m.epoch) + msg.Epoch = m.epoch } // SetNodes sets information list about all storage nodes from the NeoFS network. // // Argument MUST NOT be mutated, make a copy first. // -// See also Nodes. +// See also [NetMap.Nodes]. func (m *NetMap) SetNodes(nodes []NodeInfo) { m.nodes = nodes } -// Nodes returns nodes set using SetNodes. +// Nodes returns nodes set using [NetMap.SetNodes]. // // The value returned shares memory with the structure itself, so changing it can lead to data corruption. // Make a copy if you need to change it. @@ -88,12 +91,12 @@ func (m NetMap) Nodes() []NodeInfo { // SetEpoch specifies revision number of the NetMap. // -// See also Epoch. +// See also [NetMap.Epoch]. func (m *NetMap) SetEpoch(epoch uint64) { m.epoch = epoch } -// Epoch returns epoch set using SetEpoch. +// Epoch returns epoch set using [NetMap.SetEpoch]. // // Zero NetMap has zero revision. func (m NetMap) Epoch() uint64 { @@ -149,10 +152,7 @@ func flattenNodes(ns []nodes) nodes { // object identifier can be used as pivot. Result is deterministic for // the fixed NetMap and parameters. func (m NetMap) PlacementVectors(vectors [][]NodeInfo, objectID oid.ID) ([][]NodeInfo, error) { - pivot := make([]byte, sha256.Size) - objectID.Encode(pivot) - - h := hrw.WrapBytes(pivot) + h := hrw.WrapBytes(objectID[:]) wf := defaultWeightFunc(m.nodes) result := make([][]NodeInfo, len(vectors)) @@ -172,7 +172,7 @@ func (m NetMap) PlacementVectors(vectors [][]NodeInfo, objectID oid.ID) ([][]Nod // the policy, and then selected by Selector list. Result is not deterministic and // node order in each vector may vary for call. // -// Result can be used in PlacementVectors. +// Result can be used in [NetMap.PlacementVectors]. // // The value returned shares memory with the structure itself, so changing it can lead to data corruption. // Make a copy if you need to change it. @@ -180,9 +180,7 @@ func (m NetMap) ContainerNodes(p PlacementPolicy, containerID cid.ID) ([][]NodeI c := newContext(m) c.setCBF(p.backupFactor) - pivot := make([]byte, sha256.Size) - containerID.Encode(pivot) - c.setPivot(pivot) + c.setPivot(containerID[:]) if err := c.processFilters(p); err != nil { return nil, err @@ -195,12 +193,12 @@ func (m NetMap) ContainerNodes(p PlacementPolicy, containerID cid.ID) ([][]NodeI result := make([][]NodeInfo, len(p.replicas)) for i := range p.replicas { - sName := p.replicas[i].GetSelector() + sName := p.replicas[i].SelectorName() if sName == "" { if len(p.selectors) == 0 { - var s netmap.Selector - s.SetCount(p.replicas[i].GetCount()) - s.SetFilter(mainFilterName) + var s Selector + s.SetNumberOfNodes(p.replicas[i].NumberOfObjects()) + s.SetFilterName(mainFilterName) nodes, err := c.getSelection(p, s) if err != nil { @@ -211,7 +209,7 @@ func (m NetMap) ContainerNodes(p PlacementPolicy, containerID cid.ID) ([][]NodeI } for i := range p.selectors { - result[i] = append(result[i], flattenNodes(c.selections[p.selectors[i].GetName()])...) + result[i] = append(result[i], flattenNodes(c.selections[p.selectors[i].Name()])...) } continue diff --git a/netmap/netmap_test.go b/netmap/netmap_test.go index 67af033ef..f63c737ed 100644 --- a/netmap/netmap_test.go +++ b/netmap/netmap_test.go @@ -1,33 +1,151 @@ package netmap_test import ( + "fmt" + "strconv" "testing" - v2netmap "github.com/nspcc-dev/neofs-api-go/v2/netmap" + apinetmap "github.com/nspcc-dev/neofs-sdk-go/api/netmap" "github.com/nspcc-dev/neofs-sdk-go/netmap" netmaptest "github.com/nspcc-dev/neofs-sdk-go/netmap/test" "github.com/stretchr/testify/require" ) -func TestNetMapNodes(t *testing.T) { - var nm netmap.NetMap +func TestNetMap_ReadFromV2(t *testing.T) { + t.Run("invalid fields", func(t *testing.T) { + t.Run("nodes", func(t *testing.T) { + testCases := []struct { + name string + err string + corrupt func(*apinetmap.NodeInfo) + }{ + {name: "nil public key", err: "missing public key", corrupt: func(n *apinetmap.NodeInfo) { + n.PublicKey = nil + }}, + {name: "empty public key", err: "missing public key", corrupt: func(n *apinetmap.NodeInfo) { + n.PublicKey = []byte{} + }}, + {name: "nil network endpoints", err: "missing network endpoints", corrupt: func(n *apinetmap.NodeInfo) { + n.Addresses = nil + }}, + {name: "empty network endpoints", err: "missing network endpoints", corrupt: func(n *apinetmap.NodeInfo) { + n.Addresses = []string{} + }}, + {name: "attributes/missing key", err: "invalid attribute #1: missing key", corrupt: func(n *apinetmap.NodeInfo) { + n.Attributes = []*apinetmap.NodeInfo_Attribute{ + {Key: "key_valid", Value: "any"}, + {Key: "", Value: "any"}, + } + }}, + {name: "attributes/repeated keys", err: "multiple attributes with key=k2", corrupt: func(n *apinetmap.NodeInfo) { + n.Attributes = []*apinetmap.NodeInfo_Attribute{ + {Key: "k1", Value: "any"}, + {Key: "k2", Value: "1"}, + {Key: "k3", Value: "any"}, + {Key: "k2", Value: "2"}, + } + }}, + {name: "attributes/missing value", err: "invalid attribute #1 (key2): missing value", corrupt: func(n *apinetmap.NodeInfo) { + n.Attributes = []*apinetmap.NodeInfo_Attribute{ + {Key: "key1", Value: "any"}, + {Key: "key2", Value: ""}, + } + }}, + {name: "attributes/price format", err: "invalid price attribute (#1): invalid integer", corrupt: func(n *apinetmap.NodeInfo) { + n.Attributes = []*apinetmap.NodeInfo_Attribute{ + {Key: "any", Value: "any"}, + {Key: "Price", Value: "not_a_number"}, + } + }}, + {name: "attributes/capacity format", err: "invalid capacity attribute (#1): invalid integer", corrupt: func(n *apinetmap.NodeInfo) { + n.Attributes = []*apinetmap.NodeInfo_Attribute{ + {Key: "any", Value: "any"}, + {Key: "Capacity", Value: "not_a_number"}, + } + }}, + } + + for _, testCase := range testCases { + t.Run(testCase.name, func(t *testing.T) { + n := netmaptest.Netmap() + n.SetNodes(netmaptest.NNodes(3)) + var m apinetmap.Netmap + + n.WriteToV2(&m) + testCase.corrupt(m.Nodes[1]) + require.ErrorContains(t, n.ReadFromV2(&m), fmt.Sprintf("invalid node info #1: %s", testCase.err)) + }) + } + }) + }) +} - require.Empty(t, nm.Nodes()) +func TestNetMap_SetNodes(t *testing.T) { + var nm netmap.NetMap - nodes := []netmap.NodeInfo{netmaptest.NodeInfo(), netmaptest.NodeInfo()} + require.Zero(t, nm.Nodes()) + nodes := netmaptest.NNodes(3) nm.SetNodes(nodes) - require.ElementsMatch(t, nodes, nm.Nodes()) + require.Equal(t, nodes, nm.Nodes()) + + nodesOther := netmaptest.NNodes(2) + nm.SetNodes(nodesOther) + require.Equal(t, nodesOther, nm.Nodes()) - nodesV2 := make([]v2netmap.NodeInfo, len(nodes)) - for i := range nodes { - nodes[i].WriteToV2(&nodesV2[i]) - } + t.Run("encoding", func(t *testing.T) { + t.Run("api", func(t *testing.T) { + var src, dst netmap.NetMap + var msg apinetmap.Netmap - var m v2netmap.NetMap - nm.WriteToV2(&m) + dst.SetNodes(nodes) - require.ElementsMatch(t, nodesV2, m.Nodes()) + src.WriteToV2(&msg) + require.Zero(t, msg.Nodes) + require.NoError(t, dst.ReadFromV2(&msg)) + require.Zero(t, dst.Nodes()) + + nodes := make([]netmap.NodeInfo, 3) + for i := range nodes { + si := strconv.Itoa(i + 1) + nodes[i].SetPublicKey([]byte("pubkey_" + si)) + nodes[i].SetNetworkEndpoints([]string{"addr_" + si + "_1", "addr_" + si + "_2"}) + nodes[i].SetAttribute("attr_"+si+"_1", "val_"+si+"_1") + nodes[i].SetAttribute("attr_"+si+"_2", "val_"+si+"_2") + } + nodes[0].SetOnline() + nodes[1].SetOffline() + nodes[2].SetMaintenance() + + src.SetNodes(nodes) + + src.WriteToV2(&msg) + require.Equal(t, []*apinetmap.NodeInfo{ + {PublicKey: []byte("pubkey_1"), Addresses: []string{"addr_1_1", "addr_1_2"}, + Attributes: []*apinetmap.NodeInfo_Attribute{ + {Key: "attr_1_1", Value: "val_1_1"}, + {Key: "attr_1_2", Value: "val_1_2"}, + }, + State: apinetmap.NodeInfo_ONLINE}, + {PublicKey: []byte("pubkey_2"), Addresses: []string{"addr_2_1", "addr_2_2"}, + Attributes: []*apinetmap.NodeInfo_Attribute{ + {Key: "attr_2_1", Value: "val_2_1"}, + {Key: "attr_2_2", Value: "val_2_2"}, + }, + State: apinetmap.NodeInfo_OFFLINE}, + {PublicKey: []byte("pubkey_3"), Addresses: []string{"addr_3_1", "addr_3_2"}, + Attributes: []*apinetmap.NodeInfo_Attribute{ + {Key: "attr_3_1", Value: "val_3_1"}, + {Key: "attr_3_2", Value: "val_3_2"}, + }, + State: apinetmap.NodeInfo_MAINTENANCE, + }, + }, msg.Nodes) + err := dst.ReadFromV2(&msg) + require.NoError(t, err) + require.Equal(t, nodes, dst.Nodes()) + }) + }) } func TestNetMap_SetEpoch(t *testing.T) { @@ -35,13 +153,33 @@ func TestNetMap_SetEpoch(t *testing.T) { require.Zero(t, nm.Epoch()) - const e = 158 + const epoch = 13 + nm.SetEpoch(epoch) + require.EqualValues(t, epoch, nm.Epoch()) + + const epochOther = 42 + nm.SetEpoch(epochOther) + require.EqualValues(t, epochOther, nm.Epoch()) + + t.Run("encoding", func(t *testing.T) { + t.Run("api", func(t *testing.T) { + var src, dst netmap.NetMap + var msg apinetmap.Netmap + + dst.SetEpoch(epoch) - nm.SetEpoch(e) - require.EqualValues(t, e, nm.Epoch()) + src.WriteToV2(&msg) + require.Zero(t, msg.Epoch) + require.NoError(t, dst.ReadFromV2(&msg)) + require.Zero(t, dst.Epoch()) - var m v2netmap.NetMap - nm.WriteToV2(&m) + src.SetEpoch(epoch) - require.EqualValues(t, e, m.Epoch()) + src.WriteToV2(&msg) + require.EqualValues(t, epoch, msg.Epoch) + err := dst.ReadFromV2(&msg) + require.NoError(t, err) + require.EqualValues(t, epoch, dst.Epoch()) + }) + }) } diff --git a/netmap/network_info.go b/netmap/network_info.go index a01c3c4cf..3f55fe370 100644 --- a/netmap/network_info.go +++ b/netmap/network_info.go @@ -3,231 +3,214 @@ package netmap import ( "bytes" "encoding/binary" - "errors" "fmt" "math" "github.com/nspcc-dev/neo-go/pkg/vm/stackitem" - "github.com/nspcc-dev/neofs-api-go/v2/netmap" + "github.com/nspcc-dev/neofs-sdk-go/api/netmap" + "google.golang.org/protobuf/proto" ) // NetworkInfo groups information about the NeoFS network state. Mainly used to // describe the current state of the network. // -// NetworkInfo is mutually compatible with github.com/nspcc-dev/neofs-api-go/v2/netmap.NetworkInfo -// message. See ReadFromV2 / WriteToV2 methods. +// NetworkInfo is mutually compatible with [netmap.NetworkInfo] message. See +// [NetworkInfo.ReadFromV2] / [NetworkInfo.WriteToV2] methods. // // Instances can be created using built-in var declaration. type NetworkInfo struct { - m netmap.NetworkInfo + curEpoch uint64 + magicNumber uint64 + msPerBlock int64 + prms []*netmap.NetworkConfig_Parameter } -// reads NetworkInfo from netmap.NetworkInfo message. If checkFieldPresence is set, -// returns an error on absence of any protocol-required field. Verifies format of any -// presented field according to NeoFS API V2 protocol. -func (x *NetworkInfo) readFromV2(m netmap.NetworkInfo, checkFieldPresence bool) error { - c := m.GetNetworkConfig() - if checkFieldPresence && c == nil { - return errors.New("missing network config") - } - - if checkFieldPresence && c.NumberOfParameters() <= 0 { - return errors.New("missing network parameters") - } - +func (x *NetworkInfo) readFromV2(m *netmap.NetworkInfo) error { var err error - mNames := make(map[string]struct{}, c.NumberOfParameters()) - - c.IterateParameters(func(prm *netmap.NetworkParameter) bool { - name := string(prm.GetKey()) - - _, was := mNames[name] - if was { - err = fmt.Errorf("duplicated parameter name: %s", name) - return true + ps := m.GetNetworkConfig().GetParameters() + for i := range ps { + k := ps[i].GetKey() + if len(k) == 0 { + return fmt.Errorf("invalid network parameter #%d: missing name", i) + } + // further NPE are prevented by condition above + if len(ps[i].Value) == 0 { + return fmt.Errorf("invalid network parameter #%d: missing value", i) } - mNames[name] = struct{}{} - - switch name { - default: - if len(prm.GetValue()) == 0 { - err = fmt.Errorf("empty attribute value %s", name) - return true + for j := 0; j < i; j++ { + if bytes.Equal(ps[j].Key, k) { + return fmt.Errorf("multiple network parameters with name=%s", k) } - case configEigenTrustAlpha: + } + + switch { + case bytes.Equal(k, configEigenTrustAlpha): var num uint64 - num, err = decodeConfigValueUint64(prm.GetValue()) + num, err = decodeConfigValueUint64(ps[i].Value) if err == nil { - if alpha := math.Float64frombits(num); alpha < 0 && alpha > 1 { + if alpha := math.Float64frombits(num); alpha < 0 || alpha > 1 { err = fmt.Errorf("EigenTrust alpha value %0.2f is out of range [0, 1]", alpha) } } case - configAuditFee, - configStoragePrice, - configContainerFee, - configNamedContainerFee, - configEigenTrustNumberOfIterations, - configEpochDuration, - configIRCandidateFee, - configMaxObjSize, - configWithdrawalFee: - _, err = decodeConfigValueUint64(prm.GetValue()) - case configHomomorphicHashingDisabled, - configMaintenanceModeAllowed: - _, err = decodeConfigValueBool(prm.GetValue()) + bytes.Equal(k, configAuditFee), + bytes.Equal(k, configStoragePrice), + bytes.Equal(k, configContainerFee), + bytes.Equal(k, configNamedContainerFee), + bytes.Equal(k, configEigenTrustNumberOfIterations), + bytes.Equal(k, configEpochDuration), + bytes.Equal(k, configIRCandidateFee), + bytes.Equal(k, configMaxObjSize), + bytes.Equal(k, configWithdrawalFee): + _, err = decodeConfigValueUint64(ps[i].Value) + case bytes.Equal(k, configHomomorphicHashingDisabled), + bytes.Equal(k, configMaintenanceModeAllowed): + _, err = decodeConfigValueBool(ps[i].Value) } if err != nil { - err = fmt.Errorf("invalid %s parameter: %w", name, err) + return fmt.Errorf("invalid network parameter #%d (%s): %w", i, k, err) } - - return err != nil - }) - - if err != nil { - return err } - x.m = m + x.curEpoch = m.GetCurrentEpoch() + x.magicNumber = m.GetMagicNumber() + x.msPerBlock = m.GetMsPerBlock() + x.prms = ps return nil } -// ReadFromV2 reads NetworkInfo from the netmap.NetworkInfo message. Checks if the -// message conforms to NeoFS API V2 protocol. +// ReadFromV2 reads NetworkInfo from the [netmap.NetworkInfo] message. Returns +// an error if the message is malformed according to the NeoFS API V2 protocol. +// The message must not be nil. +// +// ReadFromV2 is intended to be used by the NeoFS API V2 client/server +// implementation only and is not expected to be directly used by applications. // -// See also WriteToV2. -func (x *NetworkInfo) ReadFromV2(m netmap.NetworkInfo) error { - return x.readFromV2(m, true) +// See also [NetworkInfo.WriteToV2]. +func (x *NetworkInfo) ReadFromV2(m *netmap.NetworkInfo) error { + return x.readFromV2(m) } -// WriteToV2 writes NetworkInfo to the netmap.NetworkInfo message. The message -// MUST NOT be nil. +// WriteToV2 writes NetworkInfo to the [netmap.NetworkInfo] message of the NeoFS +// API protocol. // -// See also ReadFromV2. +// WriteToV2 is intended to be used by the NeoFS API V2 client/server +// implementation only and is not expected to be directly used by applications. +// +// See also [NetworkInfo.ReadFromV2]. func (x NetworkInfo) WriteToV2(m *netmap.NetworkInfo) { - *m = x.m + if x.prms != nil { + m.NetworkConfig = &netmap.NetworkConfig{Parameters: x.prms} + } else { + m.NetworkConfig = nil + } + + m.CurrentEpoch = x.curEpoch + m.MagicNumber = x.magicNumber + m.MsPerBlock = x.msPerBlock } -// Marshal encodes NetworkInfo into a binary format of the NeoFS API protocol -// (Protocol Buffers with direct field order). +// Marshal encodes NetworkInfo into a binary format of the NeoFS API +// protocol (Protocol Buffers V3 with direct field order). // -// See also Unmarshal. +// See also [NetworkInfo.Unmarshal]. func (x NetworkInfo) Marshal() []byte { var m netmap.NetworkInfo x.WriteToV2(&m) - - return m.StableMarshal(nil) + b := make([]byte, m.MarshaledSize()) + m.MarshalStable(b) + return b } -// Unmarshal decodes NeoFS API protocol binary format into the NetworkInfo -// (Protocol Buffers with direct field order). Returns an error describing -// a format violation. +// Unmarshal decodes Protocol Buffers V3 binary data into the NetworkInfo. +// Returns an error describing a format violation of the specified fields. +// Unmarshal does not check presence of the required fields and, at the same +// time, checks format of presented fields. // -// See also Marshal. +// See also [NetworkInfo.Marshal]. func (x *NetworkInfo) Unmarshal(data []byte) error { var m netmap.NetworkInfo - - err := m.Unmarshal(data) + err := proto.Unmarshal(data, &m) if err != nil { - return err + return fmt.Errorf("decode protobuf: %w", err) } - return x.readFromV2(m, false) + return x.readFromV2(&m) } -// CurrentEpoch returns epoch set using SetCurrentEpoch. +// CurrentEpoch returns epoch set using [NetworkInfo.SetCurrentEpoch]. // // Zero NetworkInfo has zero current epoch. func (x NetworkInfo) CurrentEpoch() uint64 { - return x.m.GetCurrentEpoch() + return x.curEpoch } // SetCurrentEpoch sets current epoch of the NeoFS network. +// +// See also [NetworkInfo.CurrentEpoch]. func (x *NetworkInfo) SetCurrentEpoch(epoch uint64) { - x.m.SetCurrentEpoch(epoch) + x.curEpoch = epoch } -// MagicNumber returns magic number set using SetMagicNumber. +// MagicNumber returns magic number set using [NetworkInfo.SetMagicNumber]. // // Zero NetworkInfo has zero magic. func (x NetworkInfo) MagicNumber() uint64 { - return x.m.GetMagicNumber() + return x.magicNumber } // SetMagicNumber sets magic number of the NeoFS Sidechain. // -// See also MagicNumber. -func (x *NetworkInfo) SetMagicNumber(epoch uint64) { - x.m.SetMagicNumber(epoch) +// See also [NetworkInfo.MagicNumber]. +func (x *NetworkInfo) SetMagicNumber(magic uint64) { + x.magicNumber = magic } -// MsPerBlock returns network parameter set using SetMsPerBlock. +// MsPerBlock returns network parameter set using [NetworkInfo.SetMsPerBlock]. func (x NetworkInfo) MsPerBlock() int64 { - return x.m.GetMsPerBlock() + return x.msPerBlock } // SetMsPerBlock sets MillisecondsPerBlock network parameter of the NeoFS Sidechain. // -// See also MsPerBlock. +// See also [NetworkInfo.MsPerBlock]. func (x *NetworkInfo) SetMsPerBlock(v int64) { - x.m.SetMsPerBlock(v) + x.msPerBlock = v } -func (x *NetworkInfo) setConfig(name string, val []byte) { - c := x.m.GetNetworkConfig() - if c == nil { - c = new(netmap.NetworkConfig) - - var prm netmap.NetworkParameter - prm.SetKey([]byte(name)) - prm.SetValue(val) - - c.SetParameters(prm) - - x.m.SetNetworkConfig(c) - - return - } - - found := false - prms := make([]netmap.NetworkParameter, 0, c.NumberOfParameters()) - - c.IterateParameters(func(prm *netmap.NetworkParameter) bool { - found = bytes.Equal(prm.GetKey(), []byte(name)) - if found { - prm.SetValue(val) - } else { - prms = append(prms, *prm) +func (x *NetworkInfo) setConfig(name, val []byte) { + for i := range x.prms { + if bytes.Equal(x.prms[i].GetKey(), name) { + x.prms[i].Value = val + return } - - return found + } + x.prms = append(x.prms, &netmap.NetworkConfig_Parameter{ + Key: name, + Value: val, }) +} - if !found { - prms = append(prms, netmap.NetworkParameter{}) - prms[len(prms)-1].SetKey([]byte(name)) - prms[len(prms)-1].SetValue(val) - - c.SetParameters(prms...) +func (x *NetworkInfo) resetConfig(name []byte) { + for i := 0; i < len(x.prms); i++ { // do not use range, slice is changed inside + if bytes.Equal(x.prms[i].GetKey(), name) { + x.prms = append(x.prms[:i], x.prms[i+1:]...) + i-- + } } } -func (x NetworkInfo) configValue(name string) (res []byte) { - x.m.GetNetworkConfig().IterateParameters(func(prm *netmap.NetworkParameter) bool { - if string(prm.GetKey()) == name { - res = prm.GetValue() - - return true +func (x NetworkInfo) configValue(name []byte) []byte { + for i := range x.prms { + if bytes.Equal(x.prms[i].GetKey(), name) { + return x.prms[i].Value } - - return false - }) - - return + } + return nil } // SetRawNetworkParameter sets named NeoFS network parameter whose value is @@ -235,9 +218,10 @@ func (x NetworkInfo) configValue(name string) (res []byte) { // // Argument MUST NOT be mutated, make a copy first. // -// See also RawNetworkParameter, IterateRawNetworkParameters. +// See also [NetworkInfo.RawNetworkParameter], +// [NetworkInfo.IterateRawNetworkParameters]. func (x *NetworkInfo) SetRawNetworkParameter(name string, value []byte) { - x.setConfig(name, value) + x.setConfig([]byte(name), value) } // RawNetworkParameter reads raw network parameter set using [NetworkInfo.SetRawNetworkParameter] @@ -247,60 +231,64 @@ func (x *NetworkInfo) SetRawNetworkParameter(name string, value []byte) { // Make a copy if you need to change it. // // Zero NetworkInfo has no network parameters. -func (x *NetworkInfo) RawNetworkParameter(name string) []byte { - return x.configValue(name) +func (x NetworkInfo) RawNetworkParameter(name string) []byte { + return x.configValue([]byte(name)) } // IterateRawNetworkParameters iterates over all raw networks parameters set -// using SetRawNetworkParameter and passes them into f. +// using [NetworkInfo.SetRawNetworkParameter] and passes them into f. // // Handler MUST NOT be nil. Handler MUST NOT mutate value parameter. // // Zero NetworkInfo has no network parameters. -func (x *NetworkInfo) IterateRawNetworkParameters(f func(name string, value []byte)) { - c := x.m.GetNetworkConfig() - - c.IterateParameters(func(prm *netmap.NetworkParameter) bool { - name := string(prm.GetKey()) - switch name { +func (x NetworkInfo) IterateRawNetworkParameters(f func(name string, value []byte)) { + for i := range x.prms { + key := x.prms[i].GetKey() + switch { default: - f(name, prm.GetValue()) + f(string(key), x.prms[i].GetValue()) case - configEigenTrustAlpha, - configAuditFee, - configStoragePrice, - configContainerFee, - configNamedContainerFee, - configEigenTrustNumberOfIterations, - configEpochDuration, - configIRCandidateFee, - configMaxObjSize, - configWithdrawalFee, - configHomomorphicHashingDisabled, - configMaintenanceModeAllowed: + bytes.Equal(key, configEigenTrustAlpha), + bytes.Equal(key, configAuditFee), + bytes.Equal(key, configStoragePrice), + bytes.Equal(key, configContainerFee), + bytes.Equal(key, configNamedContainerFee), + bytes.Equal(key, configEigenTrustNumberOfIterations), + bytes.Equal(key, configEpochDuration), + bytes.Equal(key, configIRCandidateFee), + bytes.Equal(key, configMaxObjSize), + bytes.Equal(key, configWithdrawalFee), + bytes.Equal(key, configHomomorphicHashingDisabled), + bytes.Equal(key, configMaintenanceModeAllowed): } - - return false - }) + } } -func (x *NetworkInfo) setConfigUint64(name string, num uint64) { +func (x *NetworkInfo) setConfigUint64(name []byte, num uint64) { + if num == 0 { + x.resetConfig(name) + return + } + val := make([]byte, 8) binary.LittleEndian.PutUint64(val, num) x.setConfig(name, val) } -func (x *NetworkInfo) setConfigBool(name string, val bool) { - v := stackitem.NewBool(val) - x.setConfig(name, v.Bytes()) +func (x *NetworkInfo) setConfigBool(name []byte, val bool) { + if !val { + x.resetConfig(name) + return + } + x.setConfig(name, []byte{1}) } // decodeConfigValueUint64 parses val as little-endian uint64. // val must be less than 8 bytes in size. func decodeConfigValueUint64(val []byte) (uint64, error) { if ln := len(val); ln > 8 { - return 0, fmt.Errorf("invalid uint64 parameter length %d", ln) + return 0, fmt.Errorf("invalid numeric parameter length %d", ln) } res := uint64(0) @@ -317,13 +305,13 @@ func decodeConfigValueBool(val []byte) (bool, error) { res, err := arr.TryBool() if err != nil { - return false, fmt.Errorf("invalid bool parameter contract format %s", err) + return false, fmt.Errorf("invalid bool parameter: %w", err) } return res, nil } -func (x NetworkInfo) configUint64(name string) uint64 { +func (x NetworkInfo) configUint64(name []byte) uint64 { val := x.configValue(name) if val == nil { return 0 @@ -339,7 +327,7 @@ func (x NetworkInfo) configUint64(name string) uint64 { return res } -func (x NetworkInfo) configBool(name string) bool { +func (x NetworkInfo) configBool(name []byte) bool { val := x.configValue(name) if val == nil { return false @@ -355,45 +343,45 @@ func (x NetworkInfo) configBool(name string) bool { return res } -const configAuditFee = "AuditFee" +var configAuditFee = []byte("AuditFee") // SetAuditFee sets the configuration value of the audit fee for the Inner Ring. // -// See also AuditFee. +// See also [NetworkInfo.AuditFee]. func (x *NetworkInfo) SetAuditFee(fee uint64) { x.setConfigUint64(configAuditFee, fee) } -// AuditFee returns audit fee set using SetAuditFee. +// AuditFee returns audit fee set using [NetworkInfo.SetAuditFee]. // // Zero NetworkInfo has zero audit fee. func (x NetworkInfo) AuditFee() uint64 { return x.configUint64(configAuditFee) } -const configStoragePrice = "BasicIncomeRate" +var configStoragePrice = []byte("BasicIncomeRate") // SetStoragePrice sets the price per gigabyte of data storage that data owners // pay to storage nodes. // -// See also StoragePrice. +// See also [NetworkInfo.StoragePrice]. func (x *NetworkInfo) SetStoragePrice(price uint64) { x.setConfigUint64(configStoragePrice, price) } -// StoragePrice returns storage price set using SetStoragePrice. +// StoragePrice returns storage price set using [NetworkInfo.SetStoragePrice]. // // Zero NetworkInfo has zero storage price. func (x NetworkInfo) StoragePrice() uint64 { return x.configUint64(configStoragePrice) } -const configContainerFee = "ContainerFee" +var configContainerFee = []byte("ContainerFee") // SetContainerFee sets fee for the container creation that creator pays to // each Alphabet node. // -// See also ContainerFee. +// See also [NetworkInfo.ContainerFee]. func (x *NetworkInfo) SetContainerFee(fee uint64) { x.setConfigUint64(configContainerFee, fee) } @@ -405,29 +393,30 @@ func (x NetworkInfo) ContainerFee() uint64 { return x.configUint64(configContainerFee) } -const configNamedContainerFee = "ContainerAliasFee" +var configNamedContainerFee = []byte("ContainerAliasFee") // SetNamedContainerFee sets fee for creation of the named container creation // that creator pays to each Alphabet node. // -// See also NamedContainerFee. +// See also [NetworkInfo.NamedContainerFee]. func (x *NetworkInfo) SetNamedContainerFee(fee uint64) { x.setConfigUint64(configNamedContainerFee, fee) } -// NamedContainerFee returns container fee set using SetNamedContainerFee. +// NamedContainerFee returns container fee set using +// [NetworkInfo.SetNamedContainerFee]. // // Zero NetworkInfo has zero container fee. func (x NetworkInfo) NamedContainerFee() uint64 { return x.configUint64(configNamedContainerFee) } -const configEigenTrustAlpha = "EigenTrustAlpha" +var configEigenTrustAlpha = []byte("EigenTrustAlpha") // SetEigenTrustAlpha sets alpha parameter for EigenTrust algorithm used in // reputation system of the storage nodes. Value MUST be in range [0, 1]. // -// See also EigenTrustAlpha. +// See also [NetworkInfo.EigenTrustAlpha]. func (x *NetworkInfo) SetEigenTrustAlpha(alpha float64) { if alpha < 0 || alpha > 1 { panic(fmt.Sprintf("EigenTrust alpha parameter MUST be in range [0, 1], got %.2f", alpha)) @@ -436,7 +425,8 @@ func (x *NetworkInfo) SetEigenTrustAlpha(alpha float64) { x.setConfigUint64(configEigenTrustAlpha, math.Float64bits(alpha)) } -// EigenTrustAlpha returns EigenTrust parameter set using SetEigenTrustAlpha. +// EigenTrustAlpha returns EigenTrust parameter set using +// [NetworkInfo.SetEigenTrustAlpha]. // // Zero NetworkInfo has zero alpha parameter. func (x NetworkInfo) EigenTrustAlpha() float64 { @@ -448,124 +438,133 @@ func (x NetworkInfo) EigenTrustAlpha() float64 { return alpha } -const configEigenTrustNumberOfIterations = "EigenTrustIterations" +var configEigenTrustNumberOfIterations = []byte("EigenTrustIterations") // SetNumberOfEigenTrustIterations sets number of iterations of the EigenTrust // algorithm to perform. The algorithm is used by the storage nodes for // calculating the reputation values. // -// See also NumberOfEigenTrustIterations. +// See also [NetworkInfo.NumberOfEigenTrustIterations]. func (x *NetworkInfo) SetNumberOfEigenTrustIterations(num uint64) { x.setConfigUint64(configEigenTrustNumberOfIterations, num) } // NumberOfEigenTrustIterations returns number of EigenTrust iterations set -// using SetNumberOfEigenTrustIterations. +// using [NetworkInfo.SetNumberOfEigenTrustIterations]. // // Zero NetworkInfo has zero iteration number. func (x NetworkInfo) NumberOfEigenTrustIterations() uint64 { return x.configUint64(configEigenTrustNumberOfIterations) } -const configEpochDuration = "EpochDuration" +var configEpochDuration = []byte("EpochDuration") // SetEpochDuration sets NeoFS epoch duration measured in number of blocks of // the NeoFS Sidechain. // -// See also EpochDuration. +// See also [NetworkInfo.EpochDuration]. func (x *NetworkInfo) SetEpochDuration(blocks uint64) { x.setConfigUint64(configEpochDuration, blocks) } -// EpochDuration returns epoch duration set using SetEpochDuration. +// EpochDuration returns epoch duration set using +// [NetworkInfo.SetEpochDuration]. // // Zero NetworkInfo has zero iteration number. func (x NetworkInfo) EpochDuration() uint64 { return x.configUint64(configEpochDuration) } -const configIRCandidateFee = "InnerRingCandidateFee" +var configIRCandidateFee = []byte("InnerRingCandidateFee") // SetIRCandidateFee sets fee for Inner Ring entrance paid by a new member. // -// See also IRCandidateFee. +// See also [NetworkInfo.IRCandidateFee]. func (x *NetworkInfo) SetIRCandidateFee(fee uint64) { x.setConfigUint64(configIRCandidateFee, fee) } -// IRCandidateFee returns IR entrance fee set using SetIRCandidateFee. +// IRCandidateFee returns IR entrance fee set using +// [NetworkInfo.SetIRCandidateFee]. // // Zero NetworkInfo has zero fee. func (x NetworkInfo) IRCandidateFee() uint64 { return x.configUint64(configIRCandidateFee) } -const configMaxObjSize = "MaxObjectSize" +var configMaxObjSize = []byte("MaxObjectSize") // SetMaxObjectSize sets maximum size of the object stored locally on the // storage nodes (physical objects). Binary representation of any physically // stored object MUST NOT overflow the limit. // -// See also MaxObjectSize. +// See also [NetworkInfo.MaxObjectSize]. func (x *NetworkInfo) SetMaxObjectSize(sz uint64) { x.setConfigUint64(configMaxObjSize, sz) } -// MaxObjectSize returns maximum object size set using SetMaxObjectSize. +// MaxObjectSize returns maximum object size set using +// [NetworkInfo.SetMaxObjectSize]. // // Zero NetworkInfo has zero maximum size. func (x NetworkInfo) MaxObjectSize() uint64 { return x.configUint64(configMaxObjSize) } -const configWithdrawalFee = "WithdrawFee" +var configWithdrawalFee = []byte("WithdrawFee") // SetWithdrawalFee sets fee for withdrawals from the NeoFS accounts that // account owners pay to each Alphabet node. // -// See also WithdrawalFee. -func (x *NetworkInfo) SetWithdrawalFee(sz uint64) { - x.setConfigUint64(configWithdrawalFee, sz) +// See also [NetworkInfo.WithdrawalFee]. +func (x *NetworkInfo) SetWithdrawalFee(fee uint64) { + x.setConfigUint64(configWithdrawalFee, fee) } -// WithdrawalFee returns withdrawal fee set using SetWithdrawalFee. +// WithdrawalFee returns withdrawal fee set using +// [NetworkInfo.SetWithdrawalFee]. // // Zero NetworkInfo has zero fee. func (x NetworkInfo) WithdrawalFee() uint64 { return x.configUint64(configWithdrawalFee) } -const configHomomorphicHashingDisabled = "HomomorphicHashingDisabled" +var configHomomorphicHashingDisabled = []byte("HomomorphicHashingDisabled") -// DisableHomomorphicHashing sets flag requiring to disable homomorphic -// hashing of the containers in the network. +// SetHomomorphicHashingDisabled sets flag indicating whether homomorphic +// hashing of the containers' objects in the network is disabled. // -// See also HomomorphicHashingDisabled. -func (x *NetworkInfo) DisableHomomorphicHashing() { - x.setConfigBool(configHomomorphicHashingDisabled, true) +// See also [NetworkInfo.HomomorphicHashingDisabled]. +func (x *NetworkInfo) SetHomomorphicHashingDisabled(v bool) { + x.setConfigBool(configHomomorphicHashingDisabled, v) } -// HomomorphicHashingDisabled returns the state of the homomorphic -// hashing network setting. +// HomomorphicHashingDisabled returns flag indicating whether homomorphic +// hashing of the containers' objects in the network is disabled. // // Zero NetworkInfo has enabled homomorphic hashing. +// +// See also [NetworkInfo.SetHomomorphicHashingDisabled]. func (x NetworkInfo) HomomorphicHashingDisabled() bool { return x.configBool(configHomomorphicHashingDisabled) } -const configMaintenanceModeAllowed = "MaintenanceModeAllowed" +var configMaintenanceModeAllowed = []byte("MaintenanceModeAllowed") -// AllowMaintenanceMode sets the flag allowing nodes to go into maintenance mode. +// SetMaintenanceModeAllowed sets flag indicating whether storage nodes are +// allowed to go into the maintenance mode. // -// See also MaintenanceModeAllowed. -func (x *NetworkInfo) AllowMaintenanceMode() { - x.setConfigBool(configMaintenanceModeAllowed, true) +// See also [NetworkInfo.MaintenanceModeAllowed]. +func (x *NetworkInfo) SetMaintenanceModeAllowed(v bool) { + x.setConfigBool(configMaintenanceModeAllowed, v) } -// MaintenanceModeAllowed returns true iff network config allows -// maintenance mode for storage nodes. +// MaintenanceModeAllowed returns flag indicating whether storage nodes are +// allowed to go into the maintenance mode. // // Zero NetworkInfo has disallows maintenance mode. +// +// See also [NetworkInfo.SetMaintenanceModeAllowed]. func (x NetworkInfo) MaintenanceModeAllowed() bool { return x.configBool(configMaintenanceModeAllowed) } diff --git a/netmap/network_info_test.go b/netmap/network_info_test.go index 87db6fa2d..bd03500af 100644 --- a/netmap/network_info_test.go +++ b/netmap/network_info_test.go @@ -3,254 +3,397 @@ package netmap_test import ( "encoding/binary" "math" + "math/rand" + "reflect" "testing" - netmapv2 "github.com/nspcc-dev/neofs-api-go/v2/netmap" + apinetmap "github.com/nspcc-dev/neofs-sdk-go/api/netmap" "github.com/nspcc-dev/neofs-sdk-go/netmap" netmaptest "github.com/nspcc-dev/neofs-sdk-go/netmap/test" "github.com/stretchr/testify/require" + "google.golang.org/protobuf/proto" ) -func TestNetworkInfo_CurrentEpoch(t *testing.T) { - var x netmap.NetworkInfo +func TestNetworkInfo_ReadFromV2(t *testing.T) { + t.Run("invalid fields", func(t *testing.T) { + t.Run("network parameter", func(t *testing.T) { + testCases := []struct { + name string + err string + prm apinetmap.NetworkConfig_Parameter + }{ + {name: "nil key", err: "invalid network parameter #1: missing name", prm: apinetmap.NetworkConfig_Parameter{ + Key: nil, Value: []byte("any"), + }}, + {name: "empty key", err: "invalid network parameter #1: missing name", prm: apinetmap.NetworkConfig_Parameter{ + Key: []byte{}, Value: []byte("any"), + }}, + {name: "nil value", err: "invalid network parameter #1: missing value", prm: apinetmap.NetworkConfig_Parameter{ + Key: []byte("any"), Value: nil, + }}, + {name: "repeated keys", err: "multiple network parameters with name=any_key", prm: apinetmap.NetworkConfig_Parameter{ + Key: []byte("any_key"), Value: []byte("any"), + }}, + {name: "audit fee format", err: "invalid network parameter #1 (AuditFee): invalid numeric parameter length 13", prm: apinetmap.NetworkConfig_Parameter{ + Key: []byte("AuditFee"), Value: []byte("Hello, world!"), + }}, + {name: "storage price format", err: "invalid network parameter #1 (BasicIncomeRate): invalid numeric parameter length 13", prm: apinetmap.NetworkConfig_Parameter{ + Key: []byte("BasicIncomeRate"), Value: []byte("Hello, world!"), + }}, + {name: "container fee format", err: "invalid network parameter #1 (ContainerFee): invalid numeric parameter length 13", prm: apinetmap.NetworkConfig_Parameter{ + Key: []byte("ContainerFee"), Value: []byte("Hello, world!"), + }}, + {name: "named container fee format", err: "invalid network parameter #1 (ContainerAliasFee): invalid numeric parameter length 13", prm: apinetmap.NetworkConfig_Parameter{ + Key: []byte("ContainerAliasFee"), Value: []byte("Hello, world!"), + }}, + {name: "num of EigenTrust iterations format", err: "invalid network parameter #1 (EigenTrustIterations): invalid numeric parameter length 13", prm: apinetmap.NetworkConfig_Parameter{ + Key: []byte("EigenTrustIterations"), Value: []byte("Hello, world!"), + }}, + {name: "epoch duration format", err: "invalid network parameter #1 (EpochDuration): invalid numeric parameter length 13", prm: apinetmap.NetworkConfig_Parameter{ + Key: []byte("EpochDuration"), Value: []byte("Hello, world!"), + }}, + {name: "IR candidate fee format", err: "invalid network parameter #1 (InnerRingCandidateFee): invalid numeric parameter length 13", prm: apinetmap.NetworkConfig_Parameter{ + Key: []byte("InnerRingCandidateFee"), Value: []byte("Hello, world!"), + }}, + {name: "max object size format", err: "invalid network parameter #1 (MaxObjectSize): invalid numeric parameter length 13", prm: apinetmap.NetworkConfig_Parameter{ + Key: []byte("MaxObjectSize"), Value: []byte("Hello, world!"), + }}, + {name: "withdrawal fee format", err: "invalid network parameter #1 (WithdrawFee): invalid numeric parameter length 13", prm: apinetmap.NetworkConfig_Parameter{ + Key: []byte("WithdrawFee"), Value: []byte("Hello, world!"), + }}, + {name: "EigenTrust alpha format", err: "invalid network parameter #1 (EigenTrustAlpha): invalid numeric parameter length 13", prm: apinetmap.NetworkConfig_Parameter{ + Key: []byte("EigenTrustAlpha"), Value: []byte("Hello, world!"), + }}, + {name: "negative EigenTrust alpha", err: "invalid network parameter #1 (EigenTrustAlpha): EigenTrust alpha value -3.14 is out of range [0, 1]", prm: apinetmap.NetworkConfig_Parameter{ + Key: []byte("EigenTrustAlpha"), Value: []byte{31, 133, 235, 81, 184, 30, 9, 192}, + }}, + {name: "negative EigenTrust alpha", err: "invalid network parameter #1 (EigenTrustAlpha): EigenTrust alpha value 1.10 is out of range [0, 1]", prm: apinetmap.NetworkConfig_Parameter{ + Key: []byte("EigenTrustAlpha"), Value: []byte{154, 153, 153, 153, 153, 153, 241, 63}, + }}, + {name: "disable homomorphic hashing format", err: "invalid network parameter #1 (HomomorphicHashingDisabled): invalid bool parameter", prm: apinetmap.NetworkConfig_Parameter{ + Key: []byte("HomomorphicHashingDisabled"), Value: make([]byte, 32+1), // max 32 + }}, + {name: "allow maintenance mode format", err: "invalid network parameter #1 (MaintenanceModeAllowed): invalid bool parameter", prm: apinetmap.NetworkConfig_Parameter{ + Key: []byte("MaintenanceModeAllowed"), Value: make([]byte, 32+1), // max 32 + }}, + } + + for i := range testCases { + n := netmaptest.NetworkInfo() + var m apinetmap.NetworkInfo - require.Zero(t, x.CurrentEpoch()) + n.WriteToV2(&m) + m.NetworkConfig.Parameters = []*apinetmap.NetworkConfig_Parameter{ + {Key: []byte("any_key"), Value: []byte("any_val")}, + &testCases[i].prm, + } - const e = 13 + require.ErrorContains(t, n.ReadFromV2(&m), testCases[i].err) + } + }) + }) +} - x.SetCurrentEpoch(e) +func TestNetworkInfo_Unmarshal(t *testing.T) { + t.Run("invalid binary", func(t *testing.T) { + var n netmap.NetworkInfo + msg := []byte("definitely_not_protobuf") + err := n.Unmarshal(msg) + require.ErrorContains(t, err, "decode protobuf") + }) + t.Run("invalid fields", func(t *testing.T) { + t.Run("network parameter", func(t *testing.T) { + testCases := []struct { + name string + err string + prm apinetmap.NetworkConfig_Parameter + }{ + {name: "nil key", err: "invalid network parameter #1: missing name", prm: apinetmap.NetworkConfig_Parameter{ + Key: nil, Value: []byte("any"), + }}, + {name: "empty key", err: "invalid network parameter #1: missing name", prm: apinetmap.NetworkConfig_Parameter{ + Key: []byte{}, Value: []byte("any"), + }}, + {name: "nil value", err: "invalid network parameter #1: missing value", prm: apinetmap.NetworkConfig_Parameter{ + Key: []byte("any"), Value: nil, + }}, + {name: "repeated keys", err: "multiple network parameters with name=any_key", prm: apinetmap.NetworkConfig_Parameter{ + Key: []byte("any_key"), Value: []byte("any"), + }}, + {name: "audit fee format", err: "invalid network parameter #1 (AuditFee): invalid numeric parameter length 13", prm: apinetmap.NetworkConfig_Parameter{ + Key: []byte("AuditFee"), Value: []byte("Hello, world!"), + }}, + {name: "storage price format", err: "invalid network parameter #1 (BasicIncomeRate): invalid numeric parameter length 13", prm: apinetmap.NetworkConfig_Parameter{ + Key: []byte("BasicIncomeRate"), Value: []byte("Hello, world!"), + }}, + {name: "container fee format", err: "invalid network parameter #1 (ContainerFee): invalid numeric parameter length 13", prm: apinetmap.NetworkConfig_Parameter{ + Key: []byte("ContainerFee"), Value: []byte("Hello, world!"), + }}, + {name: "named container fee format", err: "invalid network parameter #1 (ContainerAliasFee): invalid numeric parameter length 13", prm: apinetmap.NetworkConfig_Parameter{ + Key: []byte("ContainerAliasFee"), Value: []byte("Hello, world!"), + }}, + {name: "num of EigenTrust iterations format", err: "invalid network parameter #1 (EigenTrustIterations): invalid numeric parameter length 13", prm: apinetmap.NetworkConfig_Parameter{ + Key: []byte("EigenTrustIterations"), Value: []byte("Hello, world!"), + }}, + {name: "epoch duration format", err: "invalid network parameter #1 (EpochDuration): invalid numeric parameter length 13", prm: apinetmap.NetworkConfig_Parameter{ + Key: []byte("EpochDuration"), Value: []byte("Hello, world!"), + }}, + {name: "IR candidate fee format", err: "invalid network parameter #1 (InnerRingCandidateFee): invalid numeric parameter length 13", prm: apinetmap.NetworkConfig_Parameter{ + Key: []byte("InnerRingCandidateFee"), Value: []byte("Hello, world!"), + }}, + {name: "max object size format", err: "invalid network parameter #1 (MaxObjectSize): invalid numeric parameter length 13", prm: apinetmap.NetworkConfig_Parameter{ + Key: []byte("MaxObjectSize"), Value: []byte("Hello, world!"), + }}, + {name: "withdrawal fee format", err: "invalid network parameter #1 (WithdrawFee): invalid numeric parameter length 13", prm: apinetmap.NetworkConfig_Parameter{ + Key: []byte("WithdrawFee"), Value: []byte("Hello, world!"), + }}, + {name: "EigenTrust alpha format", err: "invalid network parameter #1 (EigenTrustAlpha): invalid numeric parameter length 13", prm: apinetmap.NetworkConfig_Parameter{ + Key: []byte("EigenTrustAlpha"), Value: []byte("Hello, world!"), + }}, + {name: "negative EigenTrust alpha", err: "invalid network parameter #1 (EigenTrustAlpha): EigenTrust alpha value -3.14 is out of range [0, 1]", prm: apinetmap.NetworkConfig_Parameter{ + Key: []byte("EigenTrustAlpha"), Value: []byte{31, 133, 235, 81, 184, 30, 9, 192}, + }}, + {name: "negative EigenTrust alpha", err: "invalid network parameter #1 (EigenTrustAlpha): EigenTrust alpha value 1.10 is out of range [0, 1]", prm: apinetmap.NetworkConfig_Parameter{ + Key: []byte("EigenTrustAlpha"), Value: []byte{154, 153, 153, 153, 153, 153, 241, 63}, + }}, + {name: "disable homomorphic hashing format", err: "invalid network parameter #1 (HomomorphicHashingDisabled): invalid bool parameter", prm: apinetmap.NetworkConfig_Parameter{ + Key: []byte("HomomorphicHashingDisabled"), Value: make([]byte, 32+1), // max 32 + }}, + {name: "allow maintenance mode format", err: "invalid network parameter #1 (MaintenanceModeAllowed): invalid bool parameter", prm: apinetmap.NetworkConfig_Parameter{ + Key: []byte("MaintenanceModeAllowed"), Value: make([]byte, 32+1), // max 32 + }}, + } - require.EqualValues(t, e, x.CurrentEpoch()) + for i := range testCases { + n := netmaptest.NetworkInfo() + var m apinetmap.NetworkInfo - var m netmapv2.NetworkInfo - x.WriteToV2(&m) + n.WriteToV2(&m) + m.NetworkConfig.Parameters = []*apinetmap.NetworkConfig_Parameter{ + {Key: []byte("any_key"), Value: []byte("any_val")}, + &testCases[i].prm, + } - require.EqualValues(t, e, m.GetCurrentEpoch()) + b, err := proto.Marshal(&m) + require.NoError(t, err) + require.ErrorContains(t, n.Unmarshal(b), testCases[i].err) + } + }) + }) } -func TestNetworkInfo_MagicNumber(t *testing.T) { - var x netmap.NetworkInfo +func testNetworkInfoField[Type uint64 | int64](t *testing.T, get func(netmap.NetworkInfo) Type, set func(*netmap.NetworkInfo, Type), + getAPI func(info *apinetmap.NetworkInfo) Type) { + var n netmap.NetworkInfo + + require.Zero(t, get(n)) + + const val = 13 + set(&n, val) + require.EqualValues(t, val, get(n)) - require.Zero(t, x.MagicNumber()) + const valOther = 42 + set(&n, valOther) + require.EqualValues(t, valOther, get(n)) - const magic = 321 + t.Run("encoding", func(t *testing.T) { + t.Run("binary", func(t *testing.T) { + var src, dst netmap.NetworkInfo - x.SetMagicNumber(magic) + set(&dst, val) - require.EqualValues(t, magic, x.MagicNumber()) + require.NoError(t, dst.Unmarshal(src.Marshal())) + require.Zero(t, get(dst)) - var m netmapv2.NetworkInfo - x.WriteToV2(&m) + set(&src, val) + + require.NoError(t, dst.Unmarshal(src.Marshal())) + require.EqualValues(t, val, get(dst)) + }) + t.Run("api", func(t *testing.T) { + var src, dst netmap.NetworkInfo + var msg apinetmap.NetworkInfo + + // set required data just to satisfy decoder + src.SetRawNetworkParameter("any", []byte("any")) + + set(&dst, val) + + src.WriteToV2(&msg) + require.Zero(t, getAPI(&msg)) + require.NoError(t, dst.ReadFromV2(&msg)) + require.Zero(t, get(dst)) + + set(&src, val) + + src.WriteToV2(&msg) + require.EqualValues(t, val, getAPI(&msg)) + err := dst.ReadFromV2(&msg) + require.NoError(t, err) + require.EqualValues(t, val, get(dst)) + }) + }) +} + +func TestNetworkInfo_CurrentEpoch(t *testing.T) { + testNetworkInfoField(t, netmap.NetworkInfo.CurrentEpoch, (*netmap.NetworkInfo).SetCurrentEpoch, (*apinetmap.NetworkInfo).GetCurrentEpoch) +} - require.EqualValues(t, magic, m.GetMagicNumber()) +func TestNetworkInfo_MagicNumber(t *testing.T) { + testNetworkInfoField(t, netmap.NetworkInfo.MagicNumber, (*netmap.NetworkInfo).SetMagicNumber, (*apinetmap.NetworkInfo).GetMagicNumber) } func TestNetworkInfo_MsPerBlock(t *testing.T) { - var x netmap.NetworkInfo + testNetworkInfoField(t, netmap.NetworkInfo.MsPerBlock, (*netmap.NetworkInfo).SetMsPerBlock, (*apinetmap.NetworkInfo).GetMsPerBlock) +} - require.Zero(t, x.MsPerBlock()) +func testNetworkConfig[Type uint64 | float64 | bool](t *testing.T, get func(netmap.NetworkInfo) Type, set func(*netmap.NetworkInfo, Type), apiPrm string, + rand func() (_ Type, api []byte)) { + var n netmap.NetworkInfo - const ms = 789 + require.Zero(t, get(n)) - x.SetMsPerBlock(ms) + val, apiVal := rand() + set(&n, val) + require.EqualValues(t, val, get(n)) - require.EqualValues(t, ms, x.MsPerBlock()) + valOther, _ := rand() + set(&n, valOther) + require.EqualValues(t, valOther, get(n)) - var m netmapv2.NetworkInfo - x.WriteToV2(&m) + t.Run("encoding", func(t *testing.T) { + t.Run("binary", func(t *testing.T) { + var src, dst netmap.NetworkInfo - require.EqualValues(t, ms, m.GetMsPerBlock()) -} + set(&dst, val) + + err := dst.Unmarshal(src.Marshal()) + require.NoError(t, err) + require.Zero(t, get(dst)) + + set(&src, val) + + err = dst.Unmarshal(src.Marshal()) + require.NoError(t, err) + require.EqualValues(t, val, get(dst)) + }) + t.Run("api", func(t *testing.T) { + var src, dst netmap.NetworkInfo + var msg apinetmap.NetworkInfo + + set(&dst, val) + + src.WriteToV2(&msg) + require.Zero(t, msg.GetNetworkConfig().GetParameters()) + msg.NetworkConfig = &apinetmap.NetworkConfig{ + Parameters: []*apinetmap.NetworkConfig_Parameter{ + {Key: []byte("unique_parameter_unlikely_to_be_set"), Value: []byte("any")}, + }, + } + require.NoError(t, dst.ReadFromV2(&msg)) + require.Zero(t, get(dst)) + + set(&src, val) -func testConfigValue(t *testing.T, - getter func(x netmap.NetworkInfo) any, - setter func(x *netmap.NetworkInfo, val any), - val1, val2 any, - v2Key string, v2Val func(val any) []byte, -) { - var x netmap.NetworkInfo - - require.Zero(t, getter(x)) - - checkVal := func(exp any) { - require.EqualValues(t, exp, getter(x)) - - var m netmapv2.NetworkInfo - x.WriteToV2(&m) - - require.EqualValues(t, 1, m.GetNetworkConfig().NumberOfParameters()) - found := false - m.GetNetworkConfig().IterateParameters(func(prm *netmapv2.NetworkParameter) bool { - require.False(t, found) - require.Equal(t, []byte(v2Key), prm.GetKey()) - require.Equal(t, v2Val(exp), prm.GetValue()) - found = true - return false + src.WriteToV2(&msg) + + zero := reflect.Zero(reflect.TypeOf(val)).Interface() + if val == zero { + require.Zero(t, msg.GetNetworkConfig().GetParameters()) + } else { + require.Equal(t, []*apinetmap.NetworkConfig_Parameter{ + {Key: []byte(apiPrm), Value: apiVal}, + }, msg.NetworkConfig.Parameters) + } + err := dst.ReadFromV2(&msg) + require.NoError(t, err) + require.EqualValues(t, val, get(dst)) + + if val != zero { + set(&src, zero.(Type)) + src.WriteToV2(&msg) + require.Empty(t, msg.GetNetworkConfig().GetParameters()) + err := dst.ReadFromV2(&msg) + require.NoError(t, err) + require.Zero(t, get(dst)) + } }) - require.True(t, found) - } + }) +} + +func testNetworkConfigUint(t *testing.T, get func(netmap.NetworkInfo) uint64, set func(*netmap.NetworkInfo, uint64), apiPrm string) { + testNetworkConfig(t, get, set, apiPrm, func() (uint64, []byte) { + n := rand.Uint64() + b := make([]byte, 8) + binary.LittleEndian.PutUint64(b, n) + return n, b + }) +} - setter(&x, val1) - checkVal(val1) +func testNetworkConfigFloat(t *testing.T, get func(netmap.NetworkInfo) float64, set func(*netmap.NetworkInfo, float64), apiPrm string) { + testNetworkConfig(t, get, set, apiPrm, func() (float64, []byte) { + n := rand.Float64() + b := make([]byte, 8) + binary.LittleEndian.PutUint64(b, math.Float64bits(n)) + return n, b + }) +} - setter(&x, val2) - checkVal(val2) +func testNetworkConfigBool(t *testing.T, get func(netmap.NetworkInfo) bool, set func(*netmap.NetworkInfo, bool), apiPrm string) { + testNetworkConfig(t, get, set, apiPrm, func() (bool, []byte) { + if rand.Int()%2 == 0 { + return true, []byte{1} + } + return false, []byte{0} + }) } func TestNetworkInfo_AuditFee(t *testing.T) { - testConfigValue(t, - func(x netmap.NetworkInfo) any { return x.AuditFee() }, - func(info *netmap.NetworkInfo, val any) { info.SetAuditFee(val.(uint64)) }, - uint64(1), uint64(2), - "AuditFee", func(val any) []byte { - data := make([]byte, 8) - binary.LittleEndian.PutUint64(data, val.(uint64)) - return data - }, - ) + testNetworkConfigUint(t, netmap.NetworkInfo.AuditFee, (*netmap.NetworkInfo).SetAuditFee, "AuditFee") } func TestNetworkInfo_StoragePrice(t *testing.T) { - testConfigValue(t, - func(x netmap.NetworkInfo) any { return x.StoragePrice() }, - func(info *netmap.NetworkInfo, val any) { info.SetStoragePrice(val.(uint64)) }, - uint64(1), uint64(2), - "BasicIncomeRate", func(val any) []byte { - data := make([]byte, 8) - binary.LittleEndian.PutUint64(data, val.(uint64)) - return data - }, - ) + testNetworkConfigUint(t, netmap.NetworkInfo.StoragePrice, (*netmap.NetworkInfo).SetStoragePrice, "BasicIncomeRate") } func TestNetworkInfo_ContainerFee(t *testing.T) { - testConfigValue(t, - func(x netmap.NetworkInfo) any { return x.ContainerFee() }, - func(info *netmap.NetworkInfo, val any) { info.SetContainerFee(val.(uint64)) }, - uint64(1), uint64(2), - "ContainerFee", func(val any) []byte { - data := make([]byte, 8) - binary.LittleEndian.PutUint64(data, val.(uint64)) - return data - }, - ) + testNetworkConfigUint(t, netmap.NetworkInfo.ContainerFee, (*netmap.NetworkInfo).SetContainerFee, "ContainerFee") } func TestNetworkInfo_NamedContainerFee(t *testing.T) { - testConfigValue(t, - func(x netmap.NetworkInfo) any { return x.NamedContainerFee() }, - func(info *netmap.NetworkInfo, val any) { info.SetNamedContainerFee(val.(uint64)) }, - uint64(1), uint64(2), - "ContainerAliasFee", func(val any) []byte { - data := make([]byte, 8) - binary.LittleEndian.PutUint64(data, val.(uint64)) - return data - }, - ) + testNetworkConfigUint(t, netmap.NetworkInfo.NamedContainerFee, (*netmap.NetworkInfo).SetNamedContainerFee, "ContainerAliasFee") } func TestNetworkInfo_EigenTrustAlpha(t *testing.T) { - testConfigValue(t, - func(x netmap.NetworkInfo) any { return x.EigenTrustAlpha() }, - func(info *netmap.NetworkInfo, val any) { info.SetEigenTrustAlpha(val.(float64)) }, - 0.1, 0.2, - "EigenTrustAlpha", func(val any) []byte { - data := make([]byte, 8) - binary.LittleEndian.PutUint64(data, math.Float64bits(val.(float64))) - return data - }, - ) + testNetworkConfigFloat(t, netmap.NetworkInfo.EigenTrustAlpha, (*netmap.NetworkInfo).SetEigenTrustAlpha, "EigenTrustAlpha") } func TestNetworkInfo_NumberOfEigenTrustIterations(t *testing.T) { - testConfigValue(t, - func(x netmap.NetworkInfo) any { return x.NumberOfEigenTrustIterations() }, - func(info *netmap.NetworkInfo, val any) { info.SetNumberOfEigenTrustIterations(val.(uint64)) }, - uint64(1), uint64(2), - "EigenTrustIterations", func(val any) []byte { - data := make([]byte, 8) - binary.LittleEndian.PutUint64(data, val.(uint64)) - return data - }, - ) + testNetworkConfigUint(t, netmap.NetworkInfo.NumberOfEigenTrustIterations, (*netmap.NetworkInfo).SetNumberOfEigenTrustIterations, "EigenTrustIterations") +} + +func TestNetworkInfo_EpochDuration(t *testing.T) { + testNetworkConfigUint(t, netmap.NetworkInfo.EpochDuration, (*netmap.NetworkInfo).SetEpochDuration, "EpochDuration") } func TestNetworkInfo_IRCandidateFee(t *testing.T) { - testConfigValue(t, - func(x netmap.NetworkInfo) any { return x.IRCandidateFee() }, - func(info *netmap.NetworkInfo, val any) { info.SetIRCandidateFee(val.(uint64)) }, - uint64(1), uint64(2), - "InnerRingCandidateFee", func(val any) []byte { - data := make([]byte, 8) - binary.LittleEndian.PutUint64(data, val.(uint64)) - return data - }, - ) + testNetworkConfigUint(t, netmap.NetworkInfo.IRCandidateFee, (*netmap.NetworkInfo).SetIRCandidateFee, "InnerRingCandidateFee") } func TestNetworkInfo_MaxObjectSize(t *testing.T) { - testConfigValue(t, - func(x netmap.NetworkInfo) any { return x.MaxObjectSize() }, - func(info *netmap.NetworkInfo, val any) { info.SetMaxObjectSize(val.(uint64)) }, - uint64(1), uint64(2), - "MaxObjectSize", func(val any) []byte { - data := make([]byte, 8) - binary.LittleEndian.PutUint64(data, val.(uint64)) - return data - }, - ) + testNetworkConfigUint(t, netmap.NetworkInfo.MaxObjectSize, (*netmap.NetworkInfo).SetMaxObjectSize, "MaxObjectSize") } func TestNetworkInfo_WithdrawalFee(t *testing.T) { - testConfigValue(t, - func(x netmap.NetworkInfo) any { return x.WithdrawalFee() }, - func(info *netmap.NetworkInfo, val any) { info.SetWithdrawalFee(val.(uint64)) }, - uint64(1), uint64(2), - "WithdrawFee", func(val any) []byte { - data := make([]byte, 8) - binary.LittleEndian.PutUint64(data, val.(uint64)) - return data - }, - ) + testNetworkConfigUint(t, netmap.NetworkInfo.WithdrawalFee, (*netmap.NetworkInfo).SetWithdrawalFee, "WithdrawFee") } func TestNetworkInfo_HomomorphicHashingDisabled(t *testing.T) { - testConfigValue(t, - func(x netmap.NetworkInfo) any { return x.HomomorphicHashingDisabled() }, - func(info *netmap.NetworkInfo, val any) { - if val.(bool) { - info.DisableHomomorphicHashing() - } - }, - true, true, // it is impossible to enable hashing - "HomomorphicHashingDisabled", func(val any) []byte { - data := make([]byte, 1) - - if val.(bool) { - data[0] = 1 - } - - return data - }, - ) + testNetworkConfigBool(t, netmap.NetworkInfo.HomomorphicHashingDisabled, (*netmap.NetworkInfo).SetHomomorphicHashingDisabled, "HomomorphicHashingDisabled") } func TestNetworkInfo_MaintenanceModeAllowed(t *testing.T) { - testConfigValue(t, - func(x netmap.NetworkInfo) any { return x.MaintenanceModeAllowed() }, - func(info *netmap.NetworkInfo, val any) { - if val.(bool) { - info.AllowMaintenanceMode() - } - }, - true, true, - "MaintenanceModeAllowed", func(val any) []byte { - if val.(bool) { - return []byte{1} - } - return []byte{0} - }, - ) + testNetworkConfigBool(t, netmap.NetworkInfo.MaintenanceModeAllowed, (*netmap.NetworkInfo).SetMaintenanceModeAllowed, "MaintenanceModeAllowed") } func TestNetworkInfo_Marshal(t *testing.T) { diff --git a/netmap/node_info.go b/netmap/node_info.go index 00df52584..4d5c414ad 100644 --- a/netmap/node_info.go +++ b/netmap/node_info.go @@ -8,8 +8,10 @@ import ( "strings" "github.com/nspcc-dev/hrw/v2" - "github.com/nspcc-dev/neofs-api-go/v2/netmap" + "github.com/nspcc-dev/neofs-sdk-go/api/netmap" neofscrypto "github.com/nspcc-dev/neofs-sdk-go/crypto" + "google.golang.org/protobuf/encoding/protojson" + "google.golang.org/protobuf/proto" ) // NodeInfo groups information about NeoFS storage node which is reflected @@ -18,130 +20,156 @@ import ( // about the nodes is available to all network participants to work with the network // map (mainly to comply with container storage policies). // -// NodeInfo is mutually compatible with github.com/nspcc-dev/neofs-api-go/v2/netmap.NodeInfo -// message. See ReadFromV2 / WriteToV2 methods. +// NodeInfo is mutually compatible with [netmap.NodeInfo] message. See +// [NodeInfo.ReadFromV2] / [NodeInfo.WriteToV2] methods. // // Instances can be created using built-in var declaration. type NodeInfo struct { - m netmap.NodeInfo + state netmap.NodeInfo_State + pubKey []byte + endpoints []string + attrs []*netmap.NodeInfo_Attribute +} + +func isEmptyNodeInfo(n NodeInfo) bool { + return n.state == 0 && len(n.pubKey) == 0 && len(n.endpoints) == 0 && len(n.attrs) == 0 } // reads NodeInfo from netmap.NodeInfo message. If checkFieldPresence is set, // returns an error on absence of any protocol-required field. Verifies format of any // presented field according to NeoFS API V2 protocol. -func (x *NodeInfo) readFromV2(m netmap.NodeInfo, checkFieldPresence bool) error { +func (x *NodeInfo) readFromV2(m *netmap.NodeInfo, checkFieldPresence bool) error { var err error - binPublicKey := m.GetPublicKey() - if checkFieldPresence && len(binPublicKey) == 0 { + if checkFieldPresence && len(m.PublicKey) == 0 { return errors.New("missing public key") } - if checkFieldPresence && m.NumberOfAddresses() <= 0 { + if checkFieldPresence && len(m.Addresses) == 0 { return errors.New("missing network endpoints") } + for i := range m.Addresses { + if m.Addresses[i] == "" { + return fmt.Errorf("empty network endpoint #%d", i) + } + } + attributes := m.GetAttributes() - mAttr := make(map[string]struct{}, len(attributes)) for i := range attributes { key := attributes[i].GetKey() if key == "" { - return fmt.Errorf("empty key of the attribute #%d", i) - } else if _, ok := mAttr[key]; ok { - return fmt.Errorf("duplicated attbiuted %s", key) + return fmt.Errorf("invalid attribute #%d: missing key", i) + } // also prevents further NPE + for j := 0; j < i; j++ { + if attributes[j].Key == key { + return fmt.Errorf("multiple attributes with key=%s", key) + } } - - switch { - case key == attrCapacity: - _, err = strconv.ParseUint(attributes[i].GetValue(), 10, 64) + if attributes[i].Value == "" { + return fmt.Errorf("invalid attribute #%d (%s): missing value", i, key) + } + switch key { + case attrCapacity: + _, err = strconv.ParseUint(attributes[i].Value, 10, 64) if err != nil { - return fmt.Errorf("invalid %s attribute: %w", attrCapacity, err) + return fmt.Errorf("invalid capacity attribute (#%d): invalid integer (%w)", i, err) } - case key == attrPrice: - var err error - _, err = strconv.ParseUint(attributes[i].GetValue(), 10, 64) + case attrPrice: + _, err = strconv.ParseUint(attributes[i].Value, 10, 64) if err != nil { - return fmt.Errorf("invalid %s attribute: %w", attrPrice, err) - } - default: - if attributes[i].GetValue() == "" { - return fmt.Errorf("empty value of the attribute %s", key) + return fmt.Errorf("invalid price attribute (#%d): invalid integer (%w)", i, err) } } } - x.m = m + x.pubKey = m.PublicKey + x.endpoints = m.Addresses + x.state = m.State + x.attrs = attributes return nil } -// ReadFromV2 reads NodeInfo from the netmap.NodeInfo message. Checks if the -// message conforms to NeoFS API V2 protocol. +// ReadFromV2 reads NodeInfo from the [netmap.NodeInfo] message. Returns an +// error if the message is malformed according to the NeoFS API V2 protocol. The +// message must not be nil. +// +// ReadFromV2 is intended to be used by the NeoFS API V2 client/server +// implementation only and is not expected to be directly used by applications. // -// See also WriteToV2. -func (x *NodeInfo) ReadFromV2(m netmap.NodeInfo) error { +// See also [NodeInfo.WriteToV2]. +func (x *NodeInfo) ReadFromV2(m *netmap.NodeInfo) error { return x.readFromV2(m, true) } -// WriteToV2 writes NodeInfo to the netmap.NodeInfo message. The message MUST NOT -// be nil. +// WriteToV2 writes NodeInfo to the [netmap.NodeInfo] message of the NeoFS API +// protocol. +// +// WriteToV2 is intended to be used by the NeoFS API V2 client/server +// implementation only and is not expected to be directly used by applications. // -// See also ReadFromV2. +// See also [NodeInfo.ReadFromV2]. func (x NodeInfo) WriteToV2(m *netmap.NodeInfo) { - *m = x.m + m.Attributes = x.attrs + m.PublicKey = x.pubKey + m.Addresses = x.endpoints + m.State = x.state } -// Marshal encodes NodeInfo into a binary format of the NeoFS API protocol -// (Protocol Buffers with direct field order). +// Marshal encodes NodeInfo into a binary format of the NeoFS API +// protocol (Protocol Buffers V3 with direct field order). // -// See also Unmarshal. +// See also [NodeInfo.Unmarshal]. func (x NodeInfo) Marshal() []byte { var m netmap.NodeInfo x.WriteToV2(&m) - - return m.StableMarshal(nil) + b := make([]byte, m.MarshaledSize()) + m.MarshalStable(b) + return b } -// Unmarshal decodes NeoFS API protocol binary format into the NodeInfo -// (Protocol Buffers with direct field order). Returns an error describing -// a format violation. +// Unmarshal decodes Protocol Buffers V3 binary data into the NodeInfo. Returns +// an error describing a format violation of the specified fields. Unmarshal +// does not check presence of the required fields and, at the same time, checks +// format of presented fields. // -// See also Marshal. +// See also [NodeInfo.Marshal]. func (x *NodeInfo) Unmarshal(data []byte) error { var m netmap.NodeInfo - - err := m.Unmarshal(data) + err := proto.Unmarshal(data, &m) if err != nil { - return err + return fmt.Errorf("decode protobuf: %w", err) } - return x.readFromV2(m, false) + return x.readFromV2(&m, false) } // MarshalJSON encodes NodeInfo into a JSON format of the NeoFS API protocol -// (Protocol Buffers JSON). +// (Protocol Buffers V3 JSON). // -// See also UnmarshalJSON. +// See also [NodeInfo.UnmarshalJSON]. func (x NodeInfo) MarshalJSON() ([]byte, error) { var m netmap.NodeInfo x.WriteToV2(&m) - return m.MarshalJSON() + return protojson.Marshal(&m) } -// UnmarshalJSON decodes NeoFS API protocol JSON format into the NodeInfo -// (Protocol Buffers JSON). Returns an error describing a format violation. +// UnmarshalJSON decodes NeoFS API protocol JSON data into the NodeInfo +// (Protocol Buffers V3 JSON). Returns an error describing a format violation. +// UnmarshalJSON does not check presence of the required fields and, at the same +// time, checks format of presented fields. // -// See also MarshalJSON. +// See also [NodeInfo.MarshalJSON]. func (x *NodeInfo) UnmarshalJSON(data []byte) error { var m netmap.NodeInfo - - err := m.UnmarshalJSON(data) + err := protojson.Unmarshal(data, &m) if err != nil { - return err + return fmt.Errorf("decode protojson: %w", err) } - return x.readFromV2(m, false) + return x.readFromV2(&m, false) } // SetPublicKey sets binary-encoded public key bound to the node. The key @@ -153,7 +181,7 @@ func (x *NodeInfo) UnmarshalJSON(data []byte) error { // // See also [NodeInfo.PublicKey]. func (x *NodeInfo) SetPublicKey(key []byte) { - x.m.SetPublicKey(key) + x.pubKey = key } // PublicKey returns value set using [NodeInfo.SetPublicKey]. @@ -167,10 +195,10 @@ func (x *NodeInfo) SetPublicKey(key []byte) { // The value returned shares memory with the structure itself, so changing it can lead to data corruption. // Make a copy if you need to change it. func (x NodeInfo) PublicKey() []byte { - return x.m.GetPublicKey() + return x.pubKey } -// StringifyPublicKey returns HEX representation of PublicKey. +// StringifyPublicKey returns HEX representation of [NodeInfo.PublicKey]. func StringifyPublicKey(node NodeInfo) string { return neofscrypto.StringifyKeyBinary(node.PublicKey()) } @@ -183,37 +211,16 @@ func StringifyPublicKey(node NodeInfo) string { // // Argument MUST NOT be mutated, make a copy first. // -// See also IterateNetworkEndpoints. -func (x *NodeInfo) SetNetworkEndpoints(v ...string) { - x.m.SetAddresses(v...) -} - -// NumberOfNetworkEndpoints returns number of network endpoints announced by the node. -// -// See also SetNetworkEndpoints. -func (x NodeInfo) NumberOfNetworkEndpoints() int { - return x.m.NumberOfAddresses() +// See also [NodeInfo.IterateNetworkEndpoints]. +func (x *NodeInfo) SetNetworkEndpoints(v []string) { + x.endpoints = v } -// IterateNetworkEndpoints iterates over network endpoints announced by the -// node and pass them into f. Breaks iteration on f's true return. Handler -// MUST NOT be nil. +// NetworkEndpoints sets list to the announced node's network endpoints. // -// Zero NodeInfo contains no endpoints which is incorrect according to -// NeoFS system requirements. -// -// See also SetNetworkEndpoints. -func (x NodeInfo) IterateNetworkEndpoints(f func(string) bool) { - x.m.IterateAddresses(f) -} - -// IterateNetworkEndpoints is an extra-sugared function over IterateNetworkEndpoints -// method which allows to unconditionally iterate over all node's network endpoints. -func IterateNetworkEndpoints(node NodeInfo, f func(string)) { - node.IterateNetworkEndpoints(func(addr string) bool { - f(addr) - return false - }) +// See also [NodeInfo.SetNetworkEndpoints]. +func (x NodeInfo) NetworkEndpoints() []string { + return x.endpoints } // assert NodeInfo type provides hrw.Hasher required for HRW sorting. @@ -224,7 +231,7 @@ var _ hrw.Hashable = NodeInfo{} // Hash is needed to support weighted HRW therefore sort function sorts nodes // based on their public key. Hash isn't expected to be used directly. func (x NodeInfo) Hash() uint64 { - return hrw.Hash(x.m.GetPublicKey()) + return hrw.Hash(x.PublicKey()) } // less declares "less than" comparison between two NodeInfo instances: @@ -241,11 +248,13 @@ func (x *NodeInfo) setNumericAttribute(key string, num uint64) { // SetPrice sets the storage cost declared by the node. By default, zero // price is announced. +// +// See also [NodeInfo.Price]. func (x *NodeInfo) SetPrice(price uint64) { x.setNumericAttribute(attrPrice, price) } -// Price returns price set using SetPrice. +// Price returns price set using [NodeInfo.SetPrice]. // // Zero NodeInfo has zero price. func (x NodeInfo) Price() uint64 { @@ -264,20 +273,16 @@ func (x NodeInfo) Price() uint64 { // SetCapacity sets the storage capacity declared by the node. By default, zero // capacity is announced. +// +// See also [NodeInfo.Capacity]. func (x *NodeInfo) SetCapacity(capacity uint64) { x.setNumericAttribute(attrCapacity, capacity) } -// SetVersion sets node's version. By default, version -// is not announced. -func (x *NodeInfo) SetVersion(version string) { - x.SetAttribute(attrVersion, version) -} - -// capacity returns capacity set using SetCapacity. +// Capacity returns capacity set using [NodeInfo.SetCapacity]. // // Zero NodeInfo has zero capacity. -func (x NodeInfo) capacity() uint64 { +func (x NodeInfo) Capacity() uint64 { val := x.Attribute(attrCapacity) if val == "" { return 0 @@ -291,6 +296,20 @@ func (x NodeInfo) capacity() uint64 { return capacity } +// SetVersion sets node's version. By default, version is not announced. +// +// See also [NodeInfo.Version]. +func (x *NodeInfo) SetVersion(version string) { + x.SetAttribute(attrVersion, version) +} + +// Version returns announced node version set using [NodeInfo.SetVersion]. +// +// Zero NodeInfo has no announced version. +func (x NodeInfo) Version() string { + return x.Attribute(attrVersion) +} + const ( attrUNLOCODE = "UN-LOCODE" attrCountryCode = "CountryCode" @@ -307,12 +326,12 @@ const ( // impossible to unambiguously attribute the node to any location from UN/LOCODE // database. // -// See also LOCODE. +// See also [NodeInfo.LOCODE]. func (x *NodeInfo) SetLOCODE(locode string) { x.SetAttribute(attrUNLOCODE, locode) } -// LOCODE returns node's location code set using SetLOCODE. +// LOCODE returns node's location code set using [NodeInfo.SetLOCODE]. // // Zero NodeInfo has empty location code which is invalid according to // NeoFS API system requirement. @@ -427,12 +446,16 @@ const ( // to connect to this node from outside. // // Panics if addr is an empty list. -func (x *NodeInfo) SetExternalAddresses(addr ...string) { +// +// See also [NodeInfo.ExternalAddresses]. +func (x *NodeInfo) SetExternalAddresses(addr []string) { x.SetAttribute(attrExternalAddr, strings.Join(addr, sepExternalAddr)) } // ExternalAddresses returns list of multi-addresses to use // to connect to this node from outside. +// +// See also [NodeInfo.SetExternalAddresses]. func (x NodeInfo) ExternalAddresses() []string { a := x.Attribute(attrExternalAddr) if len(a) == 0 { @@ -444,22 +467,25 @@ func (x NodeInfo) ExternalAddresses() []string { // NumberOfAttributes returns number of attributes announced by the node. // -// See also SetAttribute. +// See also [NodeInfo.SetAttribute]. func (x NodeInfo) NumberOfAttributes() int { - return len(x.m.GetAttributes()) + return len(x.attrs) } // IterateAttributes iterates over all node attributes and passes the into f. // Handler MUST NOT be nil. +// +// See also [NodeInfo.SetAttribute]. func (x NodeInfo) IterateAttributes(f func(key, value string)) { - a := x.m.GetAttributes() - for i := range a { - f(a[i].GetKey(), a[i].GetValue()) + for i := range x.attrs { + f(x.attrs[i].GetKey(), x.attrs[i].GetValue()) } } // SetAttribute sets value of the node attribute value by the given key. // Both key and value MUST NOT be empty. +// +// See also [NodeInfo.NumberOfAttributes], [NodeInfo.IterateAttributes]. func (x *NodeInfo) SetAttribute(key, value string) { if key == "" { panic("empty key in SetAttribute") @@ -467,61 +493,59 @@ func (x *NodeInfo) SetAttribute(key, value string) { panic("empty value in SetAttribute") } - a := x.m.GetAttributes() - for i := range a { - if a[i].GetKey() == key { - a[i].SetValue(value) + for i := range x.attrs { + if x.attrs[i].GetKey() == key { + x.attrs[i].Value = value return } } - a = append(a, netmap.Attribute{}) - a[len(a)-1].SetKey(key) - a[len(a)-1].SetValue(value) - - x.m.SetAttributes(a) + x.attrs = append(x.attrs, &netmap.NodeInfo_Attribute{ + Key: key, + Value: value, + }) } -// Attribute returns value of the node attribute set using SetAttribute by the -// given key. Returns empty string if attribute is missing. +// Attribute returns value of the node attribute set using +// [NodeInfo.SetAttribute] by the given key. Returns empty string if attribute +// is missing. func (x NodeInfo) Attribute(key string) string { - a := x.m.GetAttributes() - for i := range a { - if a[i].GetKey() == key { - return a[i].GetValue() + for i := range x.attrs { + if x.attrs[i].GetKey() == key { + return x.attrs[i].GetValue() } } return "" } -// SortAttributes sorts node attributes set using SetAttribute lexicographically. -// The method is only needed to make NodeInfo consistent, e.g. for signing. +// SortAttributes sorts node attributes set using [NodeInfo.SetAttribute] +// lexicographically. The method is only needed to make NodeInfo consistent, +// e.g. for signing. func (x *NodeInfo) SortAttributes() { - as := x.m.GetAttributes() - if len(as) == 0 { + if len(x.attrs) == 0 { return } - sort.Slice(as, func(i, j int) bool { - switch strings.Compare(as[i].GetKey(), as[j].GetKey()) { + sort.Slice(x.attrs, func(i, j int) bool { + switch strings.Compare(x.attrs[i].GetKey(), x.attrs[j].GetKey()) { case -1: return true case 1: return false default: - return as[i].GetValue() < as[j].GetValue() + return x.attrs[i].GetValue() < x.attrs[j].GetValue() } }) - - x.m.SetAttributes(as) } // SetOffline sets the state of the node to "offline". When a node updates // information about itself in the network map, this action is interpreted as // an intention to leave the network. +// +// See also [NodeInfo.IsOffline]. func (x *NodeInfo) SetOffline() { - x.m.SetState(netmap.Offline) + x.state = netmap.NodeInfo_OFFLINE } // IsOffline checks if the node is in the "offline" state. @@ -529,18 +553,18 @@ func (x *NodeInfo) SetOffline() { // Zero NodeInfo has undefined state which is not offline (note that it does not // mean online). // -// See also SetOffline. +// See also [NodeInfo.SetOffline]. func (x NodeInfo) IsOffline() bool { - return x.m.GetState() == netmap.Offline + return x.state == netmap.NodeInfo_OFFLINE } // SetOnline sets the state of the node to "online". When a node updates // information about itself in the network map, this // action is interpreted as an intention to enter the network. // -// See also IsOnline. +// See also [NodeInfo.IsOnline]. func (x *NodeInfo) SetOnline() { - x.m.SetState(netmap.Online) + x.state = netmap.NodeInfo_ONLINE } // IsOnline checks if the node is in the "online" state. @@ -548,27 +572,27 @@ func (x *NodeInfo) SetOnline() { // Zero NodeInfo has undefined state which is not online (note that it does not // mean offline). // -// See also SetOnline. +// See also [NodeInfo.SetOnline]. func (x NodeInfo) IsOnline() bool { - return x.m.GetState() == netmap.Online + return x.state == netmap.NodeInfo_ONLINE } -// SetMaintenance sets the state of the node to "maintenance". When a node updates -// information about itself in the network map, this -// state declares temporal unavailability for a node. +// SetMaintenance sets the state of the node to "maintenance". When a node +// updates information about itself in the network map, this state declares +// temporal unavailability for a node. // -// See also IsMaintenance. +// See also [NodeInfo.IsMaintenance]. func (x *NodeInfo) SetMaintenance() { - x.m.SetState(netmap.Maintenance) + x.state = netmap.NodeInfo_MAINTENANCE } // IsMaintenance checks if the node is in the "maintenance" state. // // Zero NodeInfo has undefined state. // -// See also SetMaintenance. +// See also [NodeInfo.SetMaintenance]. func (x NodeInfo) IsMaintenance() bool { - return x.m.GetState() == netmap.Maintenance + return x.state == netmap.NodeInfo_MAINTENANCE } const attrVerifiedNodesDomain = "VerifiedNodesDomain" diff --git a/netmap/node_info_test.go b/netmap/node_info_test.go index bedd9d160..3e8023e25 100644 --- a/netmap/node_info_test.go +++ b/netmap/node_info_test.go @@ -1,88 +1,834 @@ -package netmap +package netmap_test import ( + "fmt" + "math/rand" + "strconv" "testing" - "github.com/nspcc-dev/neofs-api-go/v2/netmap" + apinetmap "github.com/nspcc-dev/neofs-sdk-go/api/netmap" + "github.com/nspcc-dev/neofs-sdk-go/netmap" + netmaptest "github.com/nspcc-dev/neofs-sdk-go/netmap/test" "github.com/stretchr/testify/require" + "google.golang.org/protobuf/proto" ) +func TestNodeInfo_ReadFromV2(t *testing.T) { + t.Run("missing fields", func(t *testing.T) { + t.Run("public key", func(t *testing.T) { + n := netmaptest.NodeInfo() + var m apinetmap.NodeInfo + + n.WriteToV2(&m) + m.PublicKey = nil + require.ErrorContains(t, n.ReadFromV2(&m), "missing public key") + m.PublicKey = []byte{} + require.ErrorContains(t, n.ReadFromV2(&m), "missing public key") + }) + t.Run("network endpoints", func(t *testing.T) { + n := netmaptest.NodeInfo() + var m apinetmap.NodeInfo + + n.WriteToV2(&m) + m.Addresses = nil + require.ErrorContains(t, n.ReadFromV2(&m), "missing network endpoints") + m.Addresses = []string{} + require.ErrorContains(t, n.ReadFromV2(&m), "missing network endpoints") + }) + }) + t.Run("invalid fields", func(t *testing.T) { + t.Run("network endpoints", func(t *testing.T) { + n := netmaptest.NodeInfo() + var m apinetmap.NodeInfo + + n.WriteToV2(&m) + m.Addresses = []string{"any", "", "any"} + require.ErrorContains(t, n.ReadFromV2(&m), "empty network endpoint #1") + }) + t.Run("attributes", func(t *testing.T) { + t.Run("missing key", func(t *testing.T) { + n := netmaptest.NodeInfo() + var m apinetmap.NodeInfo + + n.WriteToV2(&m) + m.Attributes = []*apinetmap.NodeInfo_Attribute{ + {Key: "key_valid", Value: "any"}, + {Key: "", Value: "any"}, + } + require.ErrorContains(t, n.ReadFromV2(&m), "invalid attribute #1: missing key") + }) + t.Run("repeated keys", func(t *testing.T) { + n := netmaptest.NodeInfo() + var m apinetmap.NodeInfo + + n.WriteToV2(&m) + m.Attributes = []*apinetmap.NodeInfo_Attribute{ + {Key: "k1", Value: "any"}, + {Key: "k2", Value: "1"}, + {Key: "k3", Value: "any"}, + {Key: "k2", Value: "2"}, + } + require.ErrorContains(t, n.ReadFromV2(&m), "multiple attributes with key=k2") + }) + t.Run("missing value", func(t *testing.T) { + n := netmaptest.NodeInfo() + var m apinetmap.NodeInfo + + n.WriteToV2(&m) + m.Attributes = []*apinetmap.NodeInfo_Attribute{ + {Key: "key1", Value: "any"}, + {Key: "key2", Value: ""}, + } + require.ErrorContains(t, n.ReadFromV2(&m), "invalid attribute #1 (key2): missing value") + }) + t.Run("price format", func(t *testing.T) { + n := netmaptest.NodeInfo() + var m apinetmap.NodeInfo + + n.WriteToV2(&m) + m.Attributes = []*apinetmap.NodeInfo_Attribute{ + {Key: "any", Value: "any"}, + {Key: "Price", Value: "not_a_number"}, + } + require.ErrorContains(t, n.ReadFromV2(&m), "invalid price attribute (#1): invalid integer") + }) + t.Run("capacity format", func(t *testing.T) { + n := netmaptest.NodeInfo() + var m apinetmap.NodeInfo + + n.WriteToV2(&m) + m.Attributes = []*apinetmap.NodeInfo_Attribute{ + {Key: "any", Value: "any"}, + {Key: "Capacity", Value: "not_a_number"}, + } + require.ErrorContains(t, n.ReadFromV2(&m), "invalid capacity attribute (#1): invalid integer") + }) + }) + }) +} + +func TestNodeInfo_Unmarshal(t *testing.T) { + t.Run("invalid binary", func(t *testing.T) { + var n netmap.NodeInfo + msg := []byte("definitely_not_protobuf") + err := n.Unmarshal(msg) + require.ErrorContains(t, err, "decode protobuf") + }) + t.Run("invalid fields", func(t *testing.T) { + t.Run("network endpoints", func(t *testing.T) { + var n netmap.NodeInfo + var m apinetmap.NodeInfo + m.Addresses = []string{"any", "", "any"} + b, err := proto.Marshal(&m) + require.NoError(t, err) + require.ErrorContains(t, n.Unmarshal(b), "empty network endpoint #1") + }) + t.Run("attributes", func(t *testing.T) { + t.Run("missing key", func(t *testing.T) { + var n netmap.NodeInfo + var m apinetmap.NodeInfo + m.Attributes = []*apinetmap.NodeInfo_Attribute{ + {Key: "key_valid", Value: "any"}, + {Key: "", Value: "any"}, + } + b, err := proto.Marshal(&m) + require.NoError(t, err) + require.ErrorContains(t, n.Unmarshal(b), "invalid attribute #1: missing key") + }) + t.Run("repeated keys", func(t *testing.T) { + var n netmap.NodeInfo + var m apinetmap.NodeInfo + m.Attributes = []*apinetmap.NodeInfo_Attribute{ + {Key: "k1", Value: "any"}, + {Key: "k2", Value: "1"}, + {Key: "k3", Value: "any"}, + {Key: "k2", Value: "2"}, + } + b, err := proto.Marshal(&m) + require.NoError(t, err) + require.ErrorContains(t, n.Unmarshal(b), "multiple attributes with key=k2") + }) + t.Run("missing value", func(t *testing.T) { + var n netmap.NodeInfo + var m apinetmap.NodeInfo + m.Attributes = []*apinetmap.NodeInfo_Attribute{ + {Key: "key1", Value: "any"}, + {Key: "key2", Value: ""}, + } + b, err := proto.Marshal(&m) + require.NoError(t, err) + require.ErrorContains(t, n.Unmarshal(b), "invalid attribute #1 (key2): missing value") + }) + t.Run("price", func(t *testing.T) { + var n netmap.NodeInfo + var m apinetmap.NodeInfo + m.Attributes = []*apinetmap.NodeInfo_Attribute{ + {Key: "any", Value: "any"}, + {Key: "Price", Value: "not_a_number"}, + } + b, err := proto.Marshal(&m) + require.NoError(t, err) + require.ErrorContains(t, n.Unmarshal(b), "invalid price attribute (#1): invalid integer") + }) + t.Run("capacity", func(t *testing.T) { + var n netmap.NodeInfo + var m apinetmap.NodeInfo + m.Attributes = []*apinetmap.NodeInfo_Attribute{ + {Key: "any", Value: "any"}, + {Key: "Capacity", Value: "not_a_number"}, + } + b, err := proto.Marshal(&m) + require.NoError(t, err) + require.ErrorContains(t, n.Unmarshal(b), "invalid capacity attribute (#1): invalid integer") + }) + }) + }) +} + +func TestNodeInfo_UnmarshalJSON(t *testing.T) { + t.Run("invalid json", func(t *testing.T) { + var n netmap.NodeInfo + msg := []byte("definitely_not_protojson") + err := n.UnmarshalJSON(msg) + require.ErrorContains(t, err, "decode protojson") + }) + t.Run("invalid fields", func(t *testing.T) { + testCases := []struct { + name string + err string + json string + }{{name: "empty network endpoint", err: "empty network endpoint #1", json: ` +{ + "addresses": ["any", "", "any"] +}`}, + {name: "attributes/missing key", err: "invalid attribute #1: missing key", json: ` +{ + "attributes": [ + {"key": "key_valid","value": "any"}, + {"key": "","value": "any"} + ] +}`}, + {name: "attributes/repeated keys", err: "multiple attributes with key=k2", json: ` +{ + "attributes": [ + {"key": "k1","value": "any"}, + {"key": "k2","value": "1"}, + {"key": "k3","value": "any"}, + {"key": "k2","value": "2"} + ] +}`}, + {name: "attributes/missing value", err: "invalid attribute #1 (key2): missing value", json: ` +{ + "attributes": [ + {"key": "key1","value": "any"}, + {"key": "key2","value": ""} + ] +}`}, + {name: "attributes/price", err: "invalid price attribute (#1): invalid integer", json: ` +{ + "attributes": [ + {"key": "any","value": "any"}, + {"key": "Price","value": "not_a_number"} + ] +}`}, + {name: "attributes/capacity", err: "invalid capacity attribute (#1): invalid integer", json: ` +{ + "attributes": [ + {"key": "any","value": "any"}, + {"key": "Capacity","value": "not_a_number"} + ] +}`}, + } + + for _, testCase := range testCases { + t.Run(testCase.name, func(t *testing.T) { + var n netmap.NodeInfo + require.ErrorContains(t, n.UnmarshalJSON([]byte(testCase.json)), testCase.err) + }) + } + }) +} + +func TestNodeInfo_SortAttributes(t *testing.T) { + var n netmap.NodeInfo + const a1, a2, a3 = "a1", "a2", "a3" + require.Less(t, a1, a2) + require.Less(t, a2, a3) + + // set unordered + n.SetAttribute(a3, a3) + n.SetAttribute(a1, a1) + n.SetAttribute(a2, a2) + + n.SortAttributes() + + b := n.Marshal() + var m apinetmap.NodeInfo + require.NoError(t, proto.Unmarshal(b, &m)) + n.WriteToV2(&m) + require.Equal(t, []*apinetmap.NodeInfo_Attribute{ + {Key: a1, Value: a1}, + {Key: a2, Value: a2}, + {Key: a3, Value: a3}, + }, m.Attributes) +} + +func collectNodeAttributes(n netmap.NodeInfo) [][2]string { + var res [][2]string + n.IterateAttributes(func(key, value string) { + res = append(res, [2]string{key, value}) + }) + return res +} + func TestNodeInfo_SetAttribute(t *testing.T) { - var n NodeInfo + var n netmap.NodeInfo + require.Panics(t, func() { n.SetAttribute("", "") }) + require.Panics(t, func() { n.SetAttribute("", "val") }) + require.Panics(t, func() { n.SetAttribute("key", "") }) + + const key1, val1 = "some_key1", "some_value1" + const key2, val2 = "some_key2", "some_value2" + + require.Zero(t, n.Attribute(key1)) + require.Zero(t, n.Attribute(key2)) + require.Zero(t, n.NumberOfAttributes()) + require.Zero(t, collectNodeAttributes(n)) + + n.SetAttribute(key1, val1) + n.SetAttribute(key2, val2) + require.Equal(t, val1, n.Attribute(key1)) + require.Equal(t, val2, n.Attribute(key2)) + require.EqualValues(t, 2, n.NumberOfAttributes()) + attrs := collectNodeAttributes(n) + require.Len(t, attrs, 2) + require.Contains(t, attrs, [2]string{key1, val1}) + require.Contains(t, attrs, [2]string{key2, val2}) + + n.SetAttribute(key1, val2) + n.SetAttribute(key2, val1) + require.Equal(t, val2, n.Attribute(key1)) + require.Equal(t, val1, n.Attribute(key2)) + attrs = collectNodeAttributes(n) + require.Len(t, attrs, 2) + require.Contains(t, attrs, [2]string{key1, val2}) + require.Contains(t, attrs, [2]string{key2, val1}) + + t.Run("encoding", func(t *testing.T) { + t.Run("binary", func(t *testing.T) { + var src, dst netmap.NodeInfo + + dst.SetAttribute(key1+key2, val1+val2) + + err := dst.Unmarshal(src.Marshal()) + require.NoError(t, err) + require.Zero(t, dst.Attribute(key1)) + require.Zero(t, dst.Attribute(key2)) + require.Zero(t, dst.NumberOfAttributes()) + require.Zero(t, collectNodeAttributes(dst)) + + src.SetAttribute(key1, val1) + src.SetAttribute(key2, val2) + + err = dst.Unmarshal(src.Marshal()) + require.NoError(t, err) + require.Equal(t, val1, dst.Attribute(key1)) + require.Equal(t, val2, dst.Attribute(key2)) + require.EqualValues(t, 2, dst.NumberOfAttributes()) + attrs := collectNodeAttributes(dst) + require.Len(t, attrs, 2) + require.Contains(t, attrs, [2]string{key1, val1}) + require.Contains(t, attrs, [2]string{key2, val2}) + }) + t.Run("api", func(t *testing.T) { + var src, dst netmap.NodeInfo + var msg apinetmap.NodeInfo + + // set required data just to satisfy decoder + src.SetPublicKey([]byte("any")) + src.SetNetworkEndpoints([]string{"any"}) + + dst.SetAttribute(key1, val1) - const key = "some key" - val := "some value" + src.WriteToV2(&msg) + require.Zero(t, msg.Attributes) + require.NoError(t, dst.ReadFromV2(&msg)) + require.Zero(t, dst.Attribute(key1)) + require.Zero(t, dst.Attribute(key2)) + require.Zero(t, dst.NumberOfAttributes()) + require.Zero(t, collectNodeAttributes(dst)) - require.Zero(t, n.Attribute(val)) + src.SetAttribute(key1, val1) + src.SetAttribute(key2, val2) - n.SetAttribute(key, val) - require.Equal(t, val, n.Attribute(key)) + src.WriteToV2(&msg) + require.Equal(t, []*apinetmap.NodeInfo_Attribute{ + {Key: key1, Value: val1}, + {Key: key2, Value: val2}, + }, msg.Attributes) - val = "some other value" + err := dst.ReadFromV2(&msg) + require.NoError(t, err) + require.Equal(t, val1, dst.Attribute(key1)) + require.Equal(t, val2, dst.Attribute(key2)) + require.EqualValues(t, 2, dst.NumberOfAttributes()) + attrs := collectNodeAttributes(dst) + require.Len(t, attrs, 2) + require.Contains(t, attrs, [2]string{key1, val1}) + require.Contains(t, attrs, [2]string{key2, val2}) + }) + t.Run("json", func(t *testing.T) { + var src, dst netmap.NodeInfo - n.SetAttribute(key, val) - require.Equal(t, val, n.Attribute(key)) + dst.SetAttribute(key1, val1) + + j, err := src.MarshalJSON() + require.NoError(t, err) + err = dst.UnmarshalJSON(j) + require.NoError(t, err) + require.Zero(t, dst.Attribute(key1)) + require.Zero(t, dst.Attribute(key2)) + require.Zero(t, dst.NumberOfAttributes()) + require.Zero(t, collectNodeAttributes(dst)) + + src.SetAttribute(key1, val1) + src.SetAttribute(key2, val2) + + j, err = src.MarshalJSON() + require.NoError(t, err) + err = dst.UnmarshalJSON(j) + require.NoError(t, err) + require.Equal(t, val1, dst.Attribute(key1)) + require.Equal(t, val2, dst.Attribute(key2)) + require.EqualValues(t, 2, dst.NumberOfAttributes()) + attrs := collectNodeAttributes(dst) + require.Len(t, attrs, 2) + require.Contains(t, attrs, [2]string{key1, val1}) + require.Contains(t, attrs, [2]string{key2, val2}) + }) + }) } -func TestNodeInfo_Status(t *testing.T) { - var n NodeInfo +func testNodeInfoState(t *testing.T, get func(netmap.NodeInfo) bool, set func(*netmap.NodeInfo), apiVal apinetmap.NodeInfo_State) { + var n netmap.NodeInfo + require.False(t, get(n)) + set(&n) + require.True(t, get(n)) + + t.Run("encoding", func(t *testing.T) { + t.Run("binary", func(t *testing.T) { + var src, dst netmap.NodeInfo + + set(&dst) + + err := dst.Unmarshal(src.Marshal()) + require.NoError(t, err) + require.False(t, get(dst)) + + set(&src) + + err = dst.Unmarshal(src.Marshal()) + require.NoError(t, err) + require.True(t, get(dst)) + }) + t.Run("api", func(t *testing.T) { + var src, dst netmap.NodeInfo + var msg apinetmap.NodeInfo + + // set required data just to satisfy decoder + src.SetPublicKey([]byte("any")) + src.SetNetworkEndpoints([]string{"any"}) - require.False(t, n.IsOnline()) - require.False(t, n.IsOffline()) - require.False(t, n.IsMaintenance()) + set(&dst) - n.SetOnline() - require.True(t, n.IsOnline()) - require.False(t, n.IsOffline()) - require.False(t, n.IsMaintenance()) + src.WriteToV2(&msg) + require.Zero(t, msg.State) + require.NoError(t, dst.ReadFromV2(&msg)) + require.False(t, get(dst)) - n.SetOffline() - require.True(t, n.IsOffline()) - require.False(t, n.IsOnline()) - require.False(t, n.IsMaintenance()) + set(&src) - n.SetMaintenance() - require.True(t, n.IsMaintenance()) - require.False(t, n.IsOnline()) - require.False(t, n.IsOffline()) + src.WriteToV2(&msg) + require.Equal(t, apiVal, msg.State) + err := dst.ReadFromV2(&msg) + require.NoError(t, err) + require.True(t, get(dst)) + }) + t.Run("json", func(t *testing.T) { + var src, dst netmap.NodeInfo + + set(&dst) + + j, err := src.MarshalJSON() + require.NoError(t, err) + err = dst.UnmarshalJSON(j) + require.NoError(t, err) + require.False(t, get(dst)) + + set(&src) + + j, err = src.MarshalJSON() + require.NoError(t, err) + err = dst.UnmarshalJSON(j) + require.NoError(t, err) + require.True(t, get(dst)) + }) + }) } -func TestNodeInfo_ExternalAddr(t *testing.T) { - var n NodeInfo +func TestNodeInfoState(t *testing.T) { + t.Run("online", func(t *testing.T) { + testNodeInfoState(t, netmap.NodeInfo.IsOnline, (*netmap.NodeInfo).SetOnline, apinetmap.NodeInfo_ONLINE) + }) + t.Run("offline", func(t *testing.T) { + testNodeInfoState(t, netmap.NodeInfo.IsOffline, (*netmap.NodeInfo).SetOffline, apinetmap.NodeInfo_OFFLINE) + }) + t.Run("maintenance", func(t *testing.T) { + testNodeInfoState(t, netmap.NodeInfo.IsMaintenance, (*netmap.NodeInfo).SetMaintenance, apinetmap.NodeInfo_MAINTENANCE) + }) +} + +func TestNodeInfo_SetPublicKey(t *testing.T) { + var n netmap.NodeInfo + + require.Zero(t, n.PublicKey()) + + key := []byte("any_public_key") + n.SetPublicKey(key) + require.Equal(t, key, n.PublicKey()) - require.Empty(t, n.ExternalAddresses()) - require.Panics(t, func() { n.SetExternalAddresses() }) + keyOther := append(key, "_other"...) + n.SetPublicKey(keyOther) + require.Equal(t, keyOther, n.PublicKey()) - addr := []string{"1", "2", "3"} - n.SetExternalAddresses(addr[0]) - require.Equal(t, addr[:1], n.ExternalAddresses()) + t.Run("encoding", func(t *testing.T) { + t.Run("binary", func(t *testing.T) { + var src, dst netmap.NodeInfo - n.SetExternalAddresses(addr[1:]...) - require.Equal(t, addr[1:], n.ExternalAddresses()) + dst.SetPublicKey(keyOther) + + err := dst.Unmarshal(src.Marshal()) + require.NoError(t, err) + require.Zero(t, dst.PublicKey()) + + src.SetPublicKey(key) + + err = dst.Unmarshal(src.Marshal()) + require.NoError(t, err) + require.Equal(t, key, dst.PublicKey()) + }) + t.Run("api", func(t *testing.T) { + var src, dst netmap.NodeInfo + var msg apinetmap.NodeInfo + + // set required data just to satisfy decoder + src.SetNetworkEndpoints([]string{"any"}) + + src.SetPublicKey(key) + + src.WriteToV2(&msg) + require.Equal(t, key, msg.PublicKey) + err := dst.ReadFromV2(&msg) + require.NoError(t, err) + require.Equal(t, key, dst.PublicKey()) + }) + t.Run("json", func(t *testing.T) { + var src, dst netmap.NodeInfo + + dst.SetPublicKey(keyOther) + + j, err := src.MarshalJSON() + require.NoError(t, err) + err = dst.UnmarshalJSON(j) + require.NoError(t, err) + require.Zero(t, dst.PublicKey()) + + src.SetPublicKey(key) + + j, err = src.MarshalJSON() + require.NoError(t, err) + err = dst.UnmarshalJSON(j) + require.NoError(t, err) + require.Equal(t, key, dst.PublicKey()) + }) + }) } -func TestNodeInfo_SetVerifiedNodesDomain(t *testing.T) { - const domain = "example.some-org.neofs" - var n NodeInfo +func TestNodeInfo_SetNetworkEndpoints(t *testing.T) { + var n netmap.NodeInfo - require.Zero(t, n.VerifiedNodesDomain()) + require.Zero(t, n.NetworkEndpoints()) - n.SetVerifiedNodesDomain(domain) - require.Equal(t, domain, n.VerifiedNodesDomain()) + endpoints := []string{"endpoint1", "endpoint2"} + n.SetNetworkEndpoints(endpoints) + require.Equal(t, endpoints, n.NetworkEndpoints()) - var msg netmap.NodeInfo - n.WriteToV2(&msg) + endpointsOther := []string{"endpoint3", "endpoint4", "endpoint5"} + n.SetNetworkEndpoints(endpointsOther) + require.Equal(t, endpointsOther, n.NetworkEndpoints()) - attrFound := false - msgAttrs := msg.GetAttributes() + t.Run("encoding", func(t *testing.T) { + t.Run("binary", func(t *testing.T) { + var src, dst netmap.NodeInfo - for i := range msgAttrs { - if msgAttrs[i].GetKey() == "VerifiedNodesDomain" { - require.False(t, attrFound) - attrFound = true - require.Equal(t, domain, msgAttrs[i].GetValue()) - } - } + dst.SetNetworkEndpoints(endpointsOther) + + err := dst.Unmarshal(src.Marshal()) + require.NoError(t, err) + require.Zero(t, dst.NetworkEndpoints()) + + src.SetNetworkEndpoints(endpoints) + + err = dst.Unmarshal(src.Marshal()) + require.NoError(t, err) + require.Equal(t, endpoints, dst.NetworkEndpoints()) + }) + t.Run("api", func(t *testing.T) { + var src, dst netmap.NodeInfo + var msg apinetmap.NodeInfo + + // set required data just to satisfy decoder + src.SetPublicKey([]byte("any")) + + src.SetNetworkEndpoints(endpoints) - require.True(t, attrFound) + src.WriteToV2(&msg) + require.Equal(t, endpoints, msg.Addresses) + err := dst.ReadFromV2(&msg) + require.NoError(t, err) + require.Equal(t, endpoints, dst.NetworkEndpoints()) + }) + t.Run("json", func(t *testing.T) { + var src, dst netmap.NodeInfo + + dst.SetNetworkEndpoints(endpointsOther) + + j, err := src.MarshalJSON() + require.NoError(t, err) + err = dst.UnmarshalJSON(j) + require.NoError(t, err) + require.Zero(t, dst.PublicKey()) + + src.SetNetworkEndpoints(endpoints) + + j, err = src.MarshalJSON() + require.NoError(t, err) + err = dst.UnmarshalJSON(j) + require.NoError(t, err) + require.Equal(t, endpoints, dst.NetworkEndpoints()) + }) + }) +} + +func TestNodeInfo_SetExternalAddresses(t *testing.T) { + var n netmap.NodeInfo + + require.Zero(t, n.ExternalAddresses()) + require.Panics(t, func() { n.SetExternalAddresses(nil) }) + require.Panics(t, func() { n.SetExternalAddresses([]string{}) }) + + addrs := []string{"addr1", "addr2", "addr3"} + n.SetExternalAddresses(addrs) + require.Equal(t, addrs, n.ExternalAddresses()) + + addrsOther := []string{"addr4", "addr5"} + n.SetExternalAddresses(addrsOther) + require.Equal(t, addrsOther, n.ExternalAddresses()) + + t.Run("encoding", func(t *testing.T) { + t.Run("binary", func(t *testing.T) { + var src, dst netmap.NodeInfo + + dst.SetExternalAddresses(addrsOther) + + err := dst.Unmarshal(src.Marshal()) + require.NoError(t, err) + require.Zero(t, dst.ExternalAddresses()) + + src.SetExternalAddresses(addrs) + + err = dst.Unmarshal(src.Marshal()) + require.NoError(t, err) + require.Equal(t, addrs, dst.ExternalAddresses()) + }) + t.Run("api", func(t *testing.T) { + var src, dst netmap.NodeInfo + var msg apinetmap.NodeInfo + + // set required data just to satisfy decoder + src.SetPublicKey([]byte("any")) + src.SetNetworkEndpoints([]string{"any"}) + + dst.SetExternalAddresses(addrsOther) + + src.WriteToV2(&msg) + require.Zero(t, msg.Attributes) + require.NoError(t, dst.ReadFromV2(&msg)) + require.Zero(t, dst.ExternalAddresses()) + + src.SetExternalAddresses(addrs) + + src.WriteToV2(&msg) + require.Equal(t, []*apinetmap.NodeInfo_Attribute{ + {Key: "ExternalAddr", Value: "addr1,addr2,addr3"}, + }, msg.Attributes) + err := dst.ReadFromV2(&msg) + require.NoError(t, err) + require.Equal(t, addrs, dst.ExternalAddresses()) + }) + t.Run("json", func(t *testing.T) { + var src, dst netmap.NodeInfo + + dst.SetExternalAddresses(addrsOther) + + j, err := src.MarshalJSON() + require.NoError(t, err) + err = dst.UnmarshalJSON(j) + require.NoError(t, err) + require.Zero(t, dst.ExternalAddresses()) + + src.SetExternalAddresses(addrs) + + j, err = src.MarshalJSON() + require.NoError(t, err) + err = dst.UnmarshalJSON(j) + require.NoError(t, err) + require.Equal(t, addrs, dst.ExternalAddresses()) + }) + }) +} + +func testNodeAttribute[Type uint64 | string](t *testing.T, get func(netmap.NodeInfo) Type, set func(*netmap.NodeInfo, Type), apiAttr string, + rand func() (_ Type, api string)) { + var n netmap.NodeInfo + + require.Zero(t, get(n)) + + val, apiVal := rand() + set(&n, val) + require.EqualValues(t, val, get(n)) + + valOther, _ := rand() + set(&n, valOther) + require.EqualValues(t, valOther, get(n)) + + t.Run("encoding", func(t *testing.T) { + t.Run("binary", func(t *testing.T) { + var src, dst netmap.NodeInfo + + set(&dst, val) + + err := dst.Unmarshal(src.Marshal()) + require.NoError(t, err) + require.Zero(t, get(dst)) + + set(&src, val) + + err = dst.Unmarshal(src.Marshal()) + require.NoError(t, err) + require.EqualValues(t, val, get(dst)) + }) + t.Run("api", func(t *testing.T) { + var src, dst netmap.NodeInfo + var msg apinetmap.NodeInfo + + // set required data just to satisfy decoder + src.SetPublicKey([]byte("any")) + src.SetNetworkEndpoints([]string{"any"}) + + set(&dst, val) + + src.WriteToV2(&msg) + require.Zero(t, msg.Attributes) + require.NoError(t, dst.ReadFromV2(&msg)) + require.Zero(t, get(dst)) + + set(&src, val) + + src.WriteToV2(&msg) + require.Equal(t, []*apinetmap.NodeInfo_Attribute{ + {Key: apiAttr, Value: apiVal}, + }, msg.Attributes) + err := dst.ReadFromV2(&msg) + require.NoError(t, err) + require.EqualValues(t, val, get(dst)) + }) + t.Run("json", func(t *testing.T) { + var src, dst netmap.NodeInfo + + set(&dst, val) + + j, err := src.MarshalJSON() + require.NoError(t, err) + err = dst.UnmarshalJSON(j) + require.NoError(t, err) + require.Zero(t, dst.Version()) + + set(&src, val) + + j, err = src.MarshalJSON() + require.NoError(t, err) + err = dst.UnmarshalJSON(j) + require.NoError(t, err) + require.EqualValues(t, val, get(dst)) + }) + }) +} + +func testNodeAttributeNum(t *testing.T, get func(netmap.NodeInfo) uint64, set func(*netmap.NodeInfo, uint64), apiAttr string) { + testNodeAttribute(t, get, set, apiAttr, func() (uint64, string) { + n := rand.Uint64() + return n, strconv.FormatUint(n, 10) + }) +} + +func testNodeAttributeString(t *testing.T, get func(netmap.NodeInfo) string, set func(*netmap.NodeInfo, string), apiAttr string) { + testNodeAttribute(t, get, set, apiAttr, func() (string, string) { + s := fmt.Sprintf("str_%d", rand.Uint64()) + return s, s + }) +} + +func TestNodeInfo_SetCapacity(t *testing.T) { + testNodeAttributeNum(t, netmap.NodeInfo.Capacity, (*netmap.NodeInfo).SetCapacity, "Capacity") +} + +func TestNodeInfo_SetPrice(t *testing.T) { + testNodeAttributeNum(t, netmap.NodeInfo.Price, (*netmap.NodeInfo).SetPrice, "Price") +} + +func TestNodeInfo_SetVersion(t *testing.T) { + testNodeAttributeString(t, netmap.NodeInfo.Version, (*netmap.NodeInfo).SetVersion, "Version") +} + +func TestNodeInfo_SetLOCODE(t *testing.T) { + testNodeAttributeString(t, netmap.NodeInfo.LOCODE, (*netmap.NodeInfo).SetLOCODE, "UN-LOCODE") +} + +func TestNodeInfo_SetCountryCode(t *testing.T) { + testNodeAttributeString(t, netmap.NodeInfo.CountryCode, (*netmap.NodeInfo).SetCountryCode, "CountryCode") +} + +func TestNodeInfo_SetCountryName(t *testing.T) { + testNodeAttributeString(t, netmap.NodeInfo.CountryName, (*netmap.NodeInfo).SetCountryName, "Country") +} + +func TestNodeInfo_SetLocationName(t *testing.T) { + testNodeAttributeString(t, netmap.NodeInfo.LocationName, (*netmap.NodeInfo).SetLocationName, "Location") +} + +func TestNodeInfo_SetSubdivisionCode(t *testing.T) { + testNodeAttributeString(t, netmap.NodeInfo.SubdivisionCode, (*netmap.NodeInfo).SetSubdivisionCode, "SubDivCode") +} + +func TestNodeInfo_SetSubdivisionName(t *testing.T) { + testNodeAttributeString(t, netmap.NodeInfo.SubdivisionName, (*netmap.NodeInfo).SetSubdivisionName, "SubDiv") +} + +func TestNodeInfo_SetContinentName(t *testing.T) { + testNodeAttributeString(t, netmap.NodeInfo.ContinentName, (*netmap.NodeInfo).SetContinentName, "Continent") +} + +func TestNodeInfo_SetVerifiedNodesDomain(t *testing.T) { + testNodeAttributeString(t, netmap.NodeInfo.VerifiedNodesDomain, (*netmap.NodeInfo).SetVerifiedNodesDomain, "VerifiedNodesDomain") } diff --git a/netmap/policy.go b/netmap/policy.go index 4275c522c..541be1182 100644 --- a/netmap/policy.go +++ b/netmap/policy.go @@ -8,26 +8,28 @@ import ( "strings" "github.com/antlr/antlr4/runtime/Go/antlr/v4" - "github.com/nspcc-dev/neofs-api-go/v2/netmap" + "github.com/nspcc-dev/neofs-sdk-go/api/netmap" "github.com/nspcc-dev/neofs-sdk-go/netmap/parser" + "google.golang.org/protobuf/encoding/protojson" + "google.golang.org/protobuf/proto" ) // PlacementPolicy declares policy to store objects in the NeoFS container. // Within itself, PlacementPolicy represents a set of rules to select a subset // of nodes from NeoFS network map - node-candidates for object storage. // -// PlacementPolicy is mutually compatible with github.com/nspcc-dev/neofs-api-go/v2/netmap.PlacementPolicy -// message. See ReadFromV2 / WriteToV2 methods. +// PlacementPolicy is mutually compatible with [netmap.PlacementPolicy] message. +// See [PlacementPolicy.ReadFromV2] / [PlacementPolicy.WriteToV2] methods. // // Instances can be created using built-in var declaration. type PlacementPolicy struct { backupFactor uint32 - filters []netmap.Filter + filters []Filter - selectors []netmap.Selector + selectors []Selector - replicas []netmap.Replica + replicas []ReplicaDescriptor } // FilterOp defines the matching property. @@ -70,148 +72,217 @@ func (x FilterOp) String() string { } } -func copyFilter(f netmap.Filter) netmap.Filter { - var filter netmap.Filter +func copyFilter(f Filter) Filter { + res := f - filter.SetName(f.GetName()) - filter.SetKey(f.GetKey()) - filter.SetOp(f.GetOp()) - filter.SetValue(f.GetValue()) - - if f.GetFilters() != nil { - filters := make([]netmap.Filter, len(f.GetFilters())) - - for i, internalFilter := range f.GetFilters() { - filters[i] = copyFilter(internalFilter) + if f.subs != nil { + res.subs = make([]Filter, len(f.subs)) + for i := range f.subs { + res.subs[i] = copyFilter(f.subs[i]) } - - filter.SetFilters(filters) } else { - filter.SetFilters(nil) + res.subs = nil } - return filter + return res } // CopyTo writes deep copy of the [PlacementPolicy] to dst. func (p PlacementPolicy) CopyTo(dst *PlacementPolicy) { dst.SetContainerBackupFactor(p.backupFactor) - dst.filters = make([]netmap.Filter, len(p.filters)) - for i, f := range p.filters { - dst.filters[i] = copyFilter(f) + if p.filters != nil { + dst.filters = make([]Filter, len(p.filters)) + for i, f := range p.filters { + dst.filters[i] = copyFilter(f) + } + } else { + dst.filters = nil } - // netmap.Selector is a struct with simple types, no links inside. Just create a new slice and copy all items inside. - dst.selectors = make([]netmap.Selector, len(p.selectors)) - copy(dst.selectors, p.selectors) + if p.selectors != nil { + dst.selectors = make([]Selector, len(p.selectors)) + copy(dst.selectors, p.selectors) + } else { + dst.selectors = nil + } - // netmap.Replica is a struct with simple types, no links inside. Just create a new slice and copy all items inside. - dst.replicas = make([]netmap.Replica, len(p.replicas)) - copy(dst.replicas, p.replicas) + if p.replicas != nil { + dst.replicas = make([]ReplicaDescriptor, len(p.replicas)) + copy(dst.replicas, p.replicas) + } else { + dst.replicas = nil + } } -func (p *PlacementPolicy) readFromV2(m netmap.PlacementPolicy, checkFieldPresence bool) error { - p.replicas = m.GetReplicas() - if checkFieldPresence && len(p.replicas) == 0 { +func (p *PlacementPolicy) readFromV2(m *netmap.PlacementPolicy, checkFieldPresence bool) error { + if checkFieldPresence && len(m.Replicas) == 0 { return errors.New("missing replicas") } - p.backupFactor = m.GetContainerBackupFactor() - p.selectors = m.GetSelectors() - p.filters = m.GetFilters() + if m.Replicas != nil { + p.replicas = make([]ReplicaDescriptor, len(m.Replicas)) + for i := range m.Replicas { + p.replicas[i] = replicaFromAPI(m.Replicas[i]) + } + } else { + p.replicas = nil + } + + if m.Selectors != nil { + p.selectors = make([]Selector, len(m.Selectors)) + for i := range m.Selectors { + p.selectors[i] = selectorFromAPI(m.Selectors[i]) + } + } else { + p.selectors = nil + } + + p.filters = filtersFromAPI(m.Filters) + p.backupFactor = m.ContainerBackupFactor return nil } // Marshal encodes PlacementPolicy into a binary format of the NeoFS API -// protocol (Protocol Buffers with direct field order). +// protocol (Protocol Buffers V3 with direct field order). // -// See also Unmarshal. +// See also [PlacementPolicy.Unmarshal]. func (p PlacementPolicy) Marshal() []byte { var m netmap.PlacementPolicy p.WriteToV2(&m) - - return m.StableMarshal(nil) + b := make([]byte, m.MarshaledSize()) + m.MarshalStable(b) + return b } -// Unmarshal decodes NeoFS API protocol binary format into the PlacementPolicy -// (Protocol Buffers with direct field order). Returns an error describing -// a format violation. +// Unmarshal decodes Protocol Buffers V3 binary data into the PlacementPolicy. +// Returns an error describing a format violation of the specified fields. +// Unmarshal does not check presence of the required fields and, at the same +// time, checks format of presented fields. // -// See also Marshal. +// See also [PlacementPolicy.Marshal]. func (p *PlacementPolicy) Unmarshal(data []byte) error { var m netmap.PlacementPolicy - - err := m.Unmarshal(data) + err := proto.Unmarshal(data, &m) if err != nil { - return err + return fmt.Errorf("decode protobuf: %w", err) } - return p.readFromV2(m, false) + return p.readFromV2(&m, false) } // MarshalJSON encodes PlacementPolicy into a JSON format of the NeoFS API -// protocol (Protocol Buffers JSON). +// protocol (Protocol Buffers V3 JSON). // -// See also UnmarshalJSON. +// See also [Token.UnmarshalJSON]. func (p PlacementPolicy) MarshalJSON() ([]byte, error) { var m netmap.PlacementPolicy p.WriteToV2(&m) - return m.MarshalJSON() + return protojson.Marshal(&m) } -// UnmarshalJSON decodes NeoFS API protocol JSON format into the PlacementPolicy -// (Protocol Buffers JSON). Returns an error describing a format violation. +// UnmarshalJSON decodes NeoFS API protocol JSON data into the PlacementPolicy +// (Protocol Buffers V3 JSON). Returns an error describing a format violation. +// UnmarshalJSON does not check presence of the required fields and, at the same +// time, checks format of presented fields. // -// See also MarshalJSON. +// See also [PlacementPolicy.MarshalJSON]. func (p *PlacementPolicy) UnmarshalJSON(data []byte) error { var m netmap.PlacementPolicy - - err := m.UnmarshalJSON(data) + err := protojson.Unmarshal(data, &m) if err != nil { - return err + return fmt.Errorf("decode protojson: %w", err) } - return p.readFromV2(m, false) + return p.readFromV2(&m, false) } -// ReadFromV2 reads PlacementPolicy from the netmap.PlacementPolicy message. -// Checks if the message conforms to NeoFS API V2 protocol. +// ReadFromV2 reads PlacementPolicy from the [netmap.PlacementPolicy] message. +// Returns an error if the message is malformed according to the NeoFS API V2 +// protocol. The message must not be nil. // -// See also WriteToV2. -func (p *PlacementPolicy) ReadFromV2(m netmap.PlacementPolicy) error { +// ReadFromV2 is intended to be used by the NeoFS API V2 client/server +// implementation only and is not expected to be directly used by applications. +// +// See also [PlacementPolicy.WriteToV2]. +func (p *PlacementPolicy) ReadFromV2(m *netmap.PlacementPolicy) error { return p.readFromV2(m, true) } -// WriteToV2 writes PlacementPolicy to the session.Token message. -// The message must not be nil. +// WriteToV2 writes PlacementPolicy to the [netmap.PlacementPolicy] message of +// the NeoFS API protocol. +// +// WriteToV2 is intended to be used by the NeoFS API V2 client/server +// implementation only and is not expected to be directly used by applications. // -// See also ReadFromV2. +// See also [PlacementPolicy.ReadFromV2]. func (p PlacementPolicy) WriteToV2(m *netmap.PlacementPolicy) { - m.SetContainerBackupFactor(p.backupFactor) - m.SetFilters(p.filters) - m.SetSelectors(p.selectors) - m.SetReplicas(p.replicas) + if p.replicas != nil { + m.Replicas = make([]*netmap.Replica, len(p.replicas)) + for i := range p.replicas { + m.Replicas[i] = replicaToAPI(p.replicas[i]) + } + } else { + m.Replicas = nil + } + + if p.selectors != nil { + m.Selectors = make([]*netmap.Selector, len(p.selectors)) + for i := range p.selectors { + m.Selectors[i] = selectorToAPI(p.selectors[i]) + } + } else { + m.Selectors = nil + } + + m.Filters = filtersToAPI(p.filters) + m.ContainerBackupFactor = p.backupFactor } // ReplicaDescriptor replica descriptor characterizes replicas of objects from // the subset selected by a particular Selector. type ReplicaDescriptor struct { - m netmap.Replica + count uint32 + selector string +} + +func replicaFromAPI(m *netmap.Replica) ReplicaDescriptor { + var res ReplicaDescriptor + if m != nil { + res.count = m.Count + res.selector = m.Selector + } + return res +} + +func isEmptyReplica(r ReplicaDescriptor) bool { + return r.count == 0 && r.selector == "" +} + +func replicaToAPI(r ReplicaDescriptor) *netmap.Replica { + if isEmptyReplica(r) { + return nil + } + return &netmap.Replica{ + Count: r.count, + Selector: r.selector, + } } // SetNumberOfObjects sets number of object replicas. +// +// See also [ReplicaDescriptor.NumberOfObjects]. func (r *ReplicaDescriptor) SetNumberOfObjects(c uint32) { - r.m.SetCount(c) + r.count = c } -// NumberOfObjects returns number set using SetNumberOfObjects. +// NumberOfObjects returns number set using [ReplicaDescriptor.SetNumberOfObjects]. // // Zero ReplicaDescriptor has zero number of objects. func (r ReplicaDescriptor) NumberOfObjects() uint32 { - return r.m.GetCount() + return r.count } // SetSelectorName sets name of the related Selector. @@ -221,7 +292,7 @@ func (r ReplicaDescriptor) NumberOfObjects() uint32 { // // See also [ReplicaDescriptor.SelectorName]. func (r *ReplicaDescriptor) SetSelectorName(s string) { - r.m.SetSelector(s) + r.selector = s } // SelectorName returns name of the related Selector. @@ -231,7 +302,7 @@ func (r *ReplicaDescriptor) SetSelectorName(s string) { // // See also [ReplicaDescriptor.SetSelectorName]. func (r ReplicaDescriptor) SelectorName() string { - return r.m.GetSelector() + return r.selector } // SetReplicas sets list of object replica's characteristics. @@ -239,11 +310,7 @@ func (r ReplicaDescriptor) SelectorName() string { // See also [PlacementPolicy.Replicas], [PlacementPolicy.NumberOfReplicas], // [PlacementPolicy.ReplicaNumberByIndex]. func (p *PlacementPolicy) SetReplicas(rs []ReplicaDescriptor) { - p.replicas = make([]netmap.Replica, len(rs)) - - for i := range rs { - p.replicas[i] = rs[i].m - } + p.replicas = rs } // Replicas returns list of object replica characteristics. @@ -251,27 +318,28 @@ func (p *PlacementPolicy) SetReplicas(rs []ReplicaDescriptor) { // See also [PlacementPolicy.SetReplicas], [PlacementPolicy.NumberOfReplicas], // [PlacementPolicy.ReplicaNumberByIndex]. func (p PlacementPolicy) Replicas() []ReplicaDescriptor { - rs := make([]ReplicaDescriptor, len(p.replicas)) - for i := range p.replicas { - rs[i].m = p.replicas[i] - } - return rs + return p.replicas } // NumberOfReplicas returns number of replica descriptors set using SetReplicas. // // Zero PlacementPolicy has no replicas which is incorrect according to the // NeoFS API protocol. +// +// See also [PlacementPolicy.SetReplicas], [PlacementPolicy.NumberOfReplicas], +// [PlacementPolicy.Replicas]. func (p PlacementPolicy) NumberOfReplicas() int { return len(p.replicas) } // ReplicaNumberByIndex returns number of object replicas from the i-th replica -// descriptor. Index MUST be in range [0; NumberOfReplicas()). +// descriptor. Index MUST be in range 0:[PlacementPolicy.NumberOfReplicas]. // // Zero PlacementPolicy has no replicas. +// +// See also [PlacementPolicy.SetReplicas], [PlacementPolicy.Replicas]. func (p PlacementPolicy) ReplicaNumberByIndex(i int) uint32 { - return p.replicas[i].GetCount() + return p.replicas[i].count } // SetContainerBackupFactor sets container backup factor: it controls how deep @@ -297,23 +365,58 @@ func (p *PlacementPolicy) ContainerBackupFactor() uint32 { // Selector describes the bucket selection operator: choose a number of nodes // from the bucket taking the nearest nodes to the related container by hash distance. type Selector struct { - m netmap.Selector + name string + count uint32 + clause netmap.Clause + attr string + filter string +} + +func selectorFromAPI(m *netmap.Selector) Selector { + var res Selector + if m != nil { + res.name = m.Name + res.count = m.Count + res.clause = m.Clause + res.attr = m.Attribute + res.filter = m.Filter + } + return res +} + +func isEmptySelector(r Selector) bool { + return r.count == 0 && r.name == "" && r.clause == 0 && r.filter == "" && r.attr == "" +} + +func selectorToAPI(s Selector) *netmap.Selector { + if isEmptySelector(s) { + return nil + } + return &netmap.Selector{ + Name: s.name, + Count: s.count, + Clause: s.clause, + Attribute: s.attr, + Filter: s.filter, + } } // SetName sets name with which the Selector can be referenced. // // Zero Selector is unnamed. +// +// See also [Selector.Name]. func (s *Selector) SetName(name string) { - s.m.SetName(name) + s.name = name } // Name returns name with which the Selector can be referenced. // // Zero Selector is unnamed. // -// See also [Selector.Name]. +// See also [Selector.SetName]. func (s Selector) Name() string { - return s.m.GetName() + return s.name } // SetNumberOfNodes sets number of nodes to select from the bucket. @@ -322,7 +425,7 @@ func (s Selector) Name() string { // // See also [Selector.NumberOfNodes]. func (s *Selector) SetNumberOfNodes(num uint32) { - s.m.SetCount(num) + s.count = num } // NumberOfNodes returns number of nodes to select from the bucket. @@ -331,7 +434,7 @@ func (s *Selector) SetNumberOfNodes(num uint32) { // // See also [Selector.SetNumberOfNodes]. func (s Selector) NumberOfNodes() uint32 { - return s.m.GetCount() + return s.count } // SelectByBucketAttribute sets attribute of the bucket to select nodes from. @@ -340,7 +443,7 @@ func (s Selector) NumberOfNodes() uint32 { // // See also [Selector.BucketAttribute]. func (s *Selector) SelectByBucketAttribute(bucket string) { - s.m.SetAttribute(bucket) + s.attr = bucket } // BucketAttribute returns attribute of the bucket to select nodes from. @@ -349,7 +452,7 @@ func (s *Selector) SelectByBucketAttribute(bucket string) { // // See also [Selector.SelectByBucketAttribute]. func (s *Selector) BucketAttribute() string { - return s.m.GetAttribute() + return s.attr } // SelectSame makes selection algorithm to select only nodes having the same values @@ -359,7 +462,7 @@ func (s *Selector) BucketAttribute() string { // // See also [Selector.SelectByBucketAttribute], [Selector.IsSame]. func (s *Selector) SelectSame() { - s.m.SetClause(netmap.Same) + s.clause = netmap.Clause_SAME } // IsSame checks whether selection algorithm is set to select only nodes having @@ -367,7 +470,7 @@ func (s *Selector) SelectSame() { // // See also [Selector.SelectSame]. func (s *Selector) IsSame() bool { - return s.m.GetClause() == netmap.Same + return s.clause == netmap.Clause_SAME } // SelectDistinct makes selection algorithm to select only nodes having the different values @@ -377,7 +480,7 @@ func (s *Selector) IsSame() bool { // // See also [Selector.SelectByBucketAttribute], [Selector.IsDistinct]. func (s *Selector) SelectDistinct() { - s.m.SetClause(netmap.Distinct) + s.clause = netmap.Clause_DISTINCT } // IsDistinct checks whether selection algorithm is set to select only nodes @@ -385,16 +488,16 @@ func (s *Selector) SelectDistinct() { // // See also [Selector.SelectByBucketAttribute], [Selector.SelectDistinct]. func (s *Selector) IsDistinct() bool { - return s.m.GetClause() == netmap.Distinct + return s.clause == netmap.Clause_DISTINCT } // SetFilterName sets reference to pre-filtering nodes for selection. // // Zero Selector has no filtering reference. // -// See also Filter.SetName. +// See also [Selector.FilterName]. func (s *Selector) SetFilterName(f string) { - s.m.SetFilter(f) + s.filter = f } // FilterName returns reference to pre-filtering nodes for selection. @@ -403,7 +506,7 @@ func (s *Selector) SetFilterName(f string) { // // See also [Filter.SetName], [Selector.SetFilterName]. func (s *Selector) FilterName() string { - return s.m.GetFilter() + return s.filter } // SetSelectors sets list of Selector to form the subset of the nodes to store @@ -413,11 +516,7 @@ func (s *Selector) FilterName() string { // // See also [PlacementPolicy.Selectors]. func (p *PlacementPolicy) SetSelectors(ss []Selector) { - p.selectors = make([]netmap.Selector, len(ss)) - - for i := range ss { - p.selectors[i] = ss[i].m - } + p.selectors = ss } // Selectors returns list of Selector to form the subset of the nodes to store @@ -427,16 +526,64 @@ func (p *PlacementPolicy) SetSelectors(ss []Selector) { // // See also [PlacementPolicy.SetSelectors]. func (p PlacementPolicy) Selectors() []Selector { - ss := make([]Selector, len(p.selectors)) - for i := range p.selectors { - ss[i].m = p.selectors[i] - } - return ss + return p.selectors } // Filter contains rules for filtering the node sets. type Filter struct { - m netmap.Filter + name string + key string + op FilterOp + val string + subs []Filter +} + +func filterFromAPI(f *netmap.Filter) Filter { + var res Filter + if f != nil { + res.name = f.Name + res.key = f.Key + res.op = FilterOp(f.Op) + res.val = f.Value + if len(f.Filters) > 0 { + res.subs = filtersFromAPI(f.Filters) + } + } + return res +} + +func filtersFromAPI(fs []*netmap.Filter) []Filter { + if fs == nil { + return nil + } + res := make([]Filter, len(fs)) + for i := range fs { + res[i] = filterFromAPI(fs[i]) + } + return res +} + +func isEmptyFilter(f Filter) bool { + return f.op == 0 && f.name == "" && f.key == "" && f.val == "" && len(f.subs) == 0 +} + +func filtersToAPI(fs []Filter) []*netmap.Filter { + if fs == nil { + return nil + } + res := make([]*netmap.Filter, len(fs)) + for i := range fs { + if !isEmptyFilter(fs[i]) { + res[i] = &netmap.Filter{ + Name: fs[i].name, + Key: fs[i].key, + Op: netmap.Operation(fs[i].op), + Value: fs[i].val, + Filters: filtersToAPI(fs[i].subs), + } + } + } + return res } // SetName sets name with which the Filter can be referenced or, for inner filters, @@ -447,7 +594,7 @@ type Filter struct { // // See also [Filter.Name]. func (x *Filter) SetName(name string) { - x.m.SetName(name) + x.name = name } // Name returns name with which the Filter can be referenced or, for inner @@ -458,57 +605,47 @@ func (x *Filter) SetName(name string) { // // See also [Filter.SetName]. func (x Filter) Name() string { - return x.m.GetName() + return x.name } // Key returns key to the property. func (x Filter) Key() string { - return x.m.GetKey() + return x.key } // Op returns operator to match the property. func (x Filter) Op() FilterOp { - return FilterOp(x.m.GetOp()) + return x.op } // Value returns value to check the property against. func (x Filter) Value() string { - return x.m.GetValue() + return x.val } // SubFilters returns list of sub-filters when Filter is complex. func (x Filter) SubFilters() []Filter { - fsm := x.m.GetFilters() - if len(fsm) == 0 { - return nil - } - - fs := make([]Filter, len(fsm)) - for i := range fsm { - fs[i] = Filter{m: fsm[i]} - } - - return fs + return x.subs } -func (x *Filter) setAttribute(key string, op netmap.Operation, val string) { - x.m.SetKey(key) - x.m.SetOp(op) - x.m.SetValue(val) +func (x *Filter) setAttribute(key string, op FilterOp, val string) { + x.key = key + x.op = op + x.val = val } // Equal applies the rule to accept only nodes with the same attribute value. // // Method SHOULD NOT be called along with other similar methods. func (x *Filter) Equal(key, value string) { - x.setAttribute(key, netmap.EQ, value) + x.setAttribute(key, FilterOpEQ, value) } // NotEqual applies the rule to accept only nodes with the distinct attribute value. // // Method SHOULD NOT be called along with other similar methods. func (x *Filter) NotEqual(key, value string) { - x.setAttribute(key, netmap.NE, value) + x.setAttribute(key, FilterOpNE, value) } // NumericGT applies the rule to accept only nodes with the numeric attribute @@ -516,7 +653,7 @@ func (x *Filter) NotEqual(key, value string) { // // Method SHOULD NOT be called along with other similar methods. func (x *Filter) NumericGT(key string, num int64) { - x.setAttribute(key, netmap.GT, strconv.FormatInt(num, 10)) + x.setAttribute(key, FilterOpGT, strconv.FormatInt(num, 10)) } // NumericGE applies the rule to accept only nodes with the numeric attribute @@ -524,7 +661,7 @@ func (x *Filter) NumericGT(key string, num int64) { // // Method SHOULD NOT be called along with other similar methods. func (x *Filter) NumericGE(key string, num int64) { - x.setAttribute(key, netmap.GE, strconv.FormatInt(num, 10)) + x.setAttribute(key, FilterOpGE, strconv.FormatInt(num, 10)) } // NumericLT applies the rule to accept only nodes with the numeric attribute @@ -532,7 +669,7 @@ func (x *Filter) NumericGE(key string, num int64) { // // Method SHOULD NOT be called along with other similar methods. func (x *Filter) NumericLT(key string, num int64) { - x.setAttribute(key, netmap.LT, strconv.FormatInt(num, 10)) + x.setAttribute(key, FilterOpLT, strconv.FormatInt(num, 10)) } // NumericLE applies the rule to accept only nodes with the numeric attribute @@ -540,22 +677,12 @@ func (x *Filter) NumericLT(key string, num int64) { // // Method SHOULD NOT be called along with other similar methods. func (x *Filter) NumericLE(key string, num int64) { - x.setAttribute(key, netmap.LE, strconv.FormatInt(num, 10)) + x.setAttribute(key, FilterOpLE, strconv.FormatInt(num, 10)) } -func (x *Filter) setInnerFilters(op netmap.Operation, filters []Filter) { +func (x *Filter) setInnerFilters(op FilterOp, filters []Filter) { x.setAttribute("", op, "") - - inner := x.m.GetFilters() - if rem := len(filters) - len(inner); rem > 0 { - inner = append(inner, make([]netmap.Filter, rem)...) - } - - for i := range filters { - inner[i] = filters[i].m - } - - x.m.SetFilters(inner) + x.subs = filters } // LogicalOR applies the rule to accept only nodes which satisfy at least one @@ -563,7 +690,7 @@ func (x *Filter) setInnerFilters(op netmap.Operation, filters []Filter) { // // Method SHOULD NOT be called along with other similar methods. func (x *Filter) LogicalOR(filters ...Filter) { - x.setInnerFilters(netmap.OR, filters) + x.setInnerFilters(FilterOpOR, filters) } // LogicalAND applies the rule to accept only nodes which satisfy all the given @@ -571,7 +698,7 @@ func (x *Filter) LogicalOR(filters ...Filter) { // // Method SHOULD NOT be called along with other similar methods. func (x *Filter) LogicalAND(filters ...Filter) { - x.setInnerFilters(netmap.AND, filters) + x.setInnerFilters(FilterOpAND, filters) } // Filters returns list of Filter that will be applied when selecting nodes. @@ -580,11 +707,7 @@ func (x *Filter) LogicalAND(filters ...Filter) { // // See also [PlacementPolicy.SetFilters]. func (p PlacementPolicy) Filters() []Filter { - fs := make([]Filter, len(p.filters)) - for i := range p.filters { - fs[i] = Filter{m: p.filters[i]} - } - return fs + return p.filters } // SetFilters sets list of Filter that will be applied when selecting nodes. @@ -593,11 +716,7 @@ func (p PlacementPolicy) Filters() []Filter { // // See also [PlacementPolicy.Filters]. func (p *PlacementPolicy) SetFilters(fs []Filter) { - p.filters = make([]netmap.Filter, len(fs)) - - for i := range fs { - p.filters[i] = fs[i].m - } + p.filters = fs } // WriteStringTo encodes PlacementPolicy into human-readably query and writes @@ -624,13 +743,10 @@ func (p PlacementPolicy) WriteStringTo(w io.StringWriter) (err error) { return err } - c := p.replicas[i].GetCount() - s := p.replicas[i].GetSelector() - - if s != "" { - _, err = w.WriteString(fmt.Sprintf("REP %d IN %s", c, s)) + if p.replicas[i].selector != "" { + _, err = w.WriteString(fmt.Sprintf("REP %d IN %s", p.replicas[i].count, p.replicas[i].selector)) } else { - _, err = w.WriteString(fmt.Sprintf("REP %d", c)) + _, err = w.WriteString(fmt.Sprintf("REP %d", p.replicas[i].count)) } if err != nil { @@ -650,46 +766,44 @@ func (p PlacementPolicy) WriteStringTo(w io.StringWriter) (err error) { } } - var s string - for i := range p.selectors { err = writeLnIfNeeded() if err != nil { return err } - _, err = w.WriteString(fmt.Sprintf("SELECT %d", p.selectors[i].GetCount())) + _, err = w.WriteString(fmt.Sprintf("SELECT %d", p.selectors[i].count)) if err != nil { return err } - if s = p.selectors[i].GetAttribute(); s != "" { + if p.selectors[i].attr != "" { var clause string - switch p.selectors[i].GetClause() { - case netmap.Same: + switch p.selectors[i].clause { + case netmap.Clause_SAME: clause = "SAME " - case netmap.Distinct: + case netmap.Clause_DISTINCT: clause = "DISTINCT " default: clause = "" } - _, err = w.WriteString(fmt.Sprintf(" IN %s%s", clause, s)) + _, err = w.WriteString(fmt.Sprintf(" IN %s%s", clause, p.selectors[i].attr)) if err != nil { return err } } - if s = p.selectors[i].GetFilter(); s != "" { - _, err = w.WriteString(" FROM " + s) + if p.selectors[i].filter != "" { + _, err = w.WriteString(" FROM " + p.selectors[i].filter) if err != nil { return err } } - if s = p.selectors[i].GetName(); s != "" { - _, err = w.WriteString(" AS " + s) + if p.selectors[i].name != "" { + _, err = w.WriteString(" AS " + p.selectors[i].name) if err != nil { return err } @@ -716,41 +830,38 @@ func (p PlacementPolicy) WriteStringTo(w io.StringWriter) (err error) { return nil } -func writeFilterStringTo(w io.StringWriter, f netmap.Filter) error { +func writeFilterStringTo(w io.StringWriter, f Filter) error { var err error - var s string - op := f.GetOp() - unspecified := op == 0 + unspecified := f.op == 0 - if s = f.GetKey(); s != "" { - _, err = w.WriteString(fmt.Sprintf("%s %s %s", s, op, f.GetValue())) + if f.key != "" { + _, err = w.WriteString(fmt.Sprintf("%s %s %s", f.key, f.op, f.val)) if err != nil { return err } - } else if s = f.GetName(); unspecified && s != "" { - _, err = w.WriteString(fmt.Sprintf("@%s", s)) + } else if unspecified && f.name != "" { + _, err = w.WriteString(fmt.Sprintf("@%s", f.name)) if err != nil { return err } } - inner := f.GetFilters() - for i := range inner { + for i := range f.subs { if i != 0 { - _, err = w.WriteString(" " + op.String() + " ") + _, err = w.WriteString(" " + f.op.String() + " ") if err != nil { return err } } - err = writeFilterStringTo(w, inner[i]) + err = writeFilterStringTo(w, f.subs[i]) if err != nil { return err } } - if s = f.GetName(); s != "" && !unspecified { - _, err = w.WriteString(" AS " + s) + if f.name != "" && !unspecified { + _, err = w.WriteString(" AS " + f.name) if err != nil { return err } @@ -829,7 +940,7 @@ func (p *policyVisitor) VisitPolicy(ctx *parser.PolicyContext) any { pl := new(PlacementPolicy) repStmts := ctx.AllRepStmt() - pl.replicas = make([]netmap.Replica, 0, len(repStmts)) + pl.replicas = make([]ReplicaDescriptor, 0, len(repStmts)) for _, r := range repStmts { res, ok := r.Accept(p).(*netmap.Replica) @@ -837,7 +948,7 @@ func (p *policyVisitor) VisitPolicy(ctx *parser.PolicyContext) any { return nil } - pl.replicas = append(pl.replicas, *res) + pl.replicas = append(pl.replicas, replicaFromAPI(res)) } if cbfStmt := ctx.CbfStmt(); cbfStmt != nil { @@ -849,7 +960,7 @@ func (p *policyVisitor) VisitPolicy(ctx *parser.PolicyContext) any { } selStmts := ctx.AllSelectStmt() - pl.selectors = make([]netmap.Selector, 0, len(selStmts)) + pl.selectors = make([]Selector, 0, len(selStmts)) for _, s := range selStmts { res, ok := s.Accept(p).(*netmap.Selector) @@ -857,14 +968,14 @@ func (p *policyVisitor) VisitPolicy(ctx *parser.PolicyContext) any { return nil } - pl.selectors = append(pl.selectors, *res) + pl.selectors = append(pl.selectors, selectorFromAPI(res)) } filtStmts := ctx.AllFilterStmt() - pl.filters = make([]netmap.Filter, 0, len(filtStmts)) + pl.filters = make([]Filter, 0, len(filtStmts)) for _, f := range filtStmts { - pl.filters = append(pl.filters, *f.Accept(p).(*netmap.Filter)) + pl.filters = append(pl.filters, filterFromAPI(f.Accept(p).(*netmap.Filter))) } return pl @@ -887,10 +998,10 @@ func (p *policyVisitor) VisitRepStmt(ctx *parser.RepStmtContext) any { } rs := new(netmap.Replica) - rs.SetCount(uint32(num)) + rs.Count = uint32(num) if sel := ctx.GetSelector(); sel != nil { - rs.SetSelector(sel.GetText()) + rs.Selector = sel.GetText() } return rs @@ -904,20 +1015,20 @@ func (p *policyVisitor) VisitSelectStmt(ctx *parser.SelectStmtContext) any { } s := new(netmap.Selector) - s.SetCount(uint32(res)) + s.Count = uint32(res) if clStmt := ctx.Clause(); clStmt != nil { - s.SetClause(clauseFromString(clStmt.GetText())) + s.Clause = clauseFromString(clStmt.GetText()) } if bStmt := ctx.GetBucket(); bStmt != nil { - s.SetAttribute(ctx.GetBucket().GetText()) + s.Attribute = ctx.GetBucket().GetText() } - s.SetFilter(ctx.GetFilter().GetText()) // either ident or wildcard + s.Filter = ctx.GetFilter().GetText() // either ident or wildcard if ctx.AS() != nil { - s.SetName(ctx.GetName().GetText()) + s.Name = ctx.GetName().GetText() } return s } @@ -925,7 +1036,7 @@ func (p *policyVisitor) VisitSelectStmt(ctx *parser.SelectStmtContext) any { // VisitFilterStmt implements parser.QueryVisitor interface. func (p *policyVisitor) VisitFilterStmt(ctx *parser.FilterStmtContext) any { f := p.VisitFilterExpr(ctx.GetExpr().(*parser.FilterExprContext)).(*netmap.Filter) - f.SetName(ctx.GetName().GetText()) + f.Name = ctx.GetName().GetText() return f } @@ -940,19 +1051,19 @@ func (p *policyVisitor) VisitFilterExpr(ctx *parser.FilterExprContext) any { f := new(netmap.Filter) op := operationFromString(ctx.GetOp().GetText()) - f.SetOp(op) + f.Op = op - f1 := *ctx.GetF1().Accept(p).(*netmap.Filter) - f2 := *ctx.GetF2().Accept(p).(*netmap.Filter) + f1 := ctx.GetF1().Accept(p).(*netmap.Filter) + f2 := ctx.GetF2().Accept(p).(*netmap.Filter) // Consider f1=(.. AND ..) AND f2. This can be merged because our AND operation // is of arbitrary arity. ANTLR generates left-associative parse-tree by default. if f1.GetOp() == op { - f.SetFilters(append(f1.GetFilters(), f2)) + f.Filters = append(f1.GetFilters(), f2) return f } - f.SetFilters([]netmap.Filter{f1, f2}) + f.Filters = []*netmap.Filter{f1, f2} return f } @@ -984,7 +1095,7 @@ func (p *policyVisitor) VisitFilterValue(ctx *parser.FilterValueContext) any { func (p *policyVisitor) VisitExpr(ctx *parser.ExprContext) any { f := new(netmap.Filter) if flt := ctx.GetFilter(); flt != nil { - f.SetName(flt.GetText()) + f.Name = flt.GetText() return f } @@ -992,9 +1103,9 @@ func (p *policyVisitor) VisitExpr(ctx *parser.ExprContext) any { opStr := ctx.SIMPLE_OP().GetText() value := ctx.GetValue().Accept(p) - f.SetKey(key.(string)) - f.SetOp(operationFromString(opStr)) - f.SetValue(value.(string)) + f.Key = key.(string) + f.Op = operationFromString(opStr) + f.Value = value.(string) return f } @@ -1005,42 +1116,44 @@ func validatePolicy(p PlacementPolicy) error { seenFilters := map[string]bool{} for i := range p.filters { - seenFilters[p.filters[i].GetName()] = true + seenFilters[p.filters[i].name] = true } seenSelectors := map[string]bool{} for i := range p.selectors { - if flt := p.selectors[i].GetFilter(); flt != mainFilterName && !seenFilters[flt] { - return fmt.Errorf("%w: '%s'", errUnknownFilter, flt) + if p.selectors[i].filter != mainFilterName && !seenFilters[p.selectors[i].filter] { + return fmt.Errorf("%w: '%s'", errUnknownFilter, p.selectors[i].filter) } - seenSelectors[p.selectors[i].GetName()] = true + seenSelectors[p.selectors[i].name] = true } for i := range p.replicas { - if sel := p.replicas[i].GetSelector(); sel != "" && !seenSelectors[sel] { - return fmt.Errorf("%w: '%s'", errUnknownSelector, sel) + if p.replicas[i].selector != "" && !seenSelectors[p.replicas[i].selector] { + return fmt.Errorf("%w: '%s'", errUnknownSelector, p.replicas[i].selector) } } return nil } -func clauseFromString(s string) (c netmap.Clause) { - if !c.FromString(strings.ToUpper(s)) { +func clauseFromString(s string) netmap.Clause { + v, ok := netmap.Clause_value[strings.ToUpper(s)] + if !ok { // Such errors should be handled by ANTLR code thus this panic. - panic(fmt.Errorf("BUG: invalid clause: %s", c)) + panic(fmt.Errorf("BUG: invalid clause: %s", s)) } - return + return netmap.Clause(v) } -func operationFromString(s string) (op netmap.Operation) { - if !op.FromString(strings.ToUpper(s)) { +func operationFromString(s string) netmap.Operation { + v, ok := netmap.Operation_value[strings.ToUpper(s)] + if !ok { // Such errors should be handled by ANTLR code thus this panic. - panic(fmt.Errorf("BUG: invalid operation: %s", op)) + panic(fmt.Errorf("BUG: invalid operation: %s", s)) } - return + return netmap.Operation(v) } diff --git a/netmap/policy_internal_test.go b/netmap/policy_internal_test.go index 9c48fad9a..97aa622ad 100644 --- a/netmap/policy_internal_test.go +++ b/netmap/policy_internal_test.go @@ -4,7 +4,6 @@ import ( "bytes" "testing" - "github.com/nspcc-dev/neofs-api-go/v2/netmap" "github.com/stretchr/testify/require" ) @@ -37,15 +36,15 @@ func TestPlacementPolicy_CopyTo(t *testing.T) { var dst PlacementPolicy pp.CopyTo(&dst) - var f2 netmap.Filter + var f2 Filter f2.SetName("filter2") - require.Equal(t, pp.filters[0].GetName(), dst.filters[0].GetName()) + require.Equal(t, pp.filters[0].Name(), dst.filters[0].Name()) dst.filters[0].SetName("f2") - require.NotEqual(t, pp.filters[0].GetName(), dst.filters[0].GetName()) + require.NotEqual(t, pp.filters[0].Name(), dst.filters[0].Name()) dst.filters[0] = f2 - require.NotEqual(t, pp.filters[0].GetName(), dst.filters[0].GetName()) + require.NotEqual(t, pp.filters[0].Name(), dst.filters[0].Name()) }) t.Run("internal filters", func(t *testing.T) { @@ -54,7 +53,7 @@ func TestPlacementPolicy_CopyTo(t *testing.T) { var topFilter Filter topFilter.SetName("topFilter") - topFilter.setInnerFilters(netmap.EQ, []Filter{includedFilter}) + topFilter.setInnerFilters(FilterOpEQ, []Filter{includedFilter}) var policy PlacementPolicy policy.SetFilters([]Filter{topFilter}) @@ -64,13 +63,13 @@ func TestPlacementPolicy_CopyTo(t *testing.T) { require.True(t, bytes.Equal(policy.Marshal(), dst.Marshal())) t.Run("change extra filter", func(t *testing.T) { - require.Equal(t, topFilter.m.GetName(), dst.filters[0].GetName()) - require.Equal(t, topFilter.m.GetFilters()[0].GetName(), dst.filters[0].GetFilters()[0].GetName()) + require.Equal(t, topFilter.Name(), dst.filters[0].Name()) + require.Equal(t, topFilter.SubFilters()[0].Name(), dst.filters[0].SubFilters()[0].Name()) - dst.filters[0].GetFilters()[0].SetName("someInternalFilterName") + dst.filters[0].SubFilters()[0].SetName("someInternalFilterName") - require.Equal(t, topFilter.m.GetName(), dst.filters[0].GetName()) - require.NotEqual(t, topFilter.m.GetFilters()[0].GetName(), dst.filters[0].GetFilters()[0].GetName()) + require.Equal(t, topFilter.Name(), dst.filters[0].Name()) + require.NotEqual(t, topFilter.SubFilters()[0].Name(), dst.filters[0].SubFilters()[0].Name()) }) }) @@ -88,23 +87,23 @@ func TestPlacementPolicy_CopyTo(t *testing.T) { var dst PlacementPolicy pp.CopyTo(&dst) - require.Equal(t, pp.selectors[0].GetName(), dst.selectors[0].GetName()) + require.Equal(t, pp.selectors[0].Name(), dst.selectors[0].Name()) dst.selectors[0].SetName("s2") - require.NotEqual(t, pp.selectors[0].GetName(), dst.selectors[0].GetName()) + require.NotEqual(t, pp.selectors[0].Name(), dst.selectors[0].Name()) - var s2 netmap.Selector + var s2 Selector s2.SetName("selector2") dst.selectors[0] = s2 - require.NotEqual(t, pp.selectors[0].GetName(), dst.selectors[0].GetName()) + require.NotEqual(t, pp.selectors[0].Name(), dst.selectors[0].Name()) }) t.Run("change replica", func(t *testing.T) { var dst PlacementPolicy pp.CopyTo(&dst) - require.Equal(t, pp.replicas[0].GetSelector(), dst.replicas[0].GetSelector()) - dst.replicas[0].SetSelector("s2") - require.NotEqual(t, pp.replicas[0].GetSelector(), dst.replicas[0].GetSelector()) + require.Equal(t, pp.replicas[0].SelectorName(), dst.replicas[0].SelectorName()) + dst.replicas[0].SetSelectorName("s2") + require.NotEqual(t, pp.replicas[0].SelectorName(), dst.replicas[0].SelectorName()) }) } diff --git a/netmap/policy_test.go b/netmap/policy_test.go index edab0b458..befb5ef3e 100644 --- a/netmap/policy_test.go +++ b/netmap/policy_test.go @@ -1,16 +1,17 @@ package netmap_test import ( + "strconv" "strings" "testing" - netmapv2 "github.com/nspcc-dev/neofs-api-go/v2/netmap" + apinetmap "github.com/nspcc-dev/neofs-sdk-go/api/netmap" "github.com/nspcc-dev/neofs-sdk-go/netmap" netmaptest "github.com/nspcc-dev/neofs-sdk-go/netmap/test" "github.com/stretchr/testify/require" ) -func TestEncode(t *testing.T) { +func TestPlacementPolicy_DecodeString(t *testing.T) { testCases := []string{ `REP 1 IN X CBF 1 @@ -46,24 +47,38 @@ FILTER City EQ SPB AND SSD EQ true OR City EQ SPB AND Rating GE 5 AS SPBSSD`, } } -func TestPlacementPolicyEncoding(t *testing.T) { - v := netmaptest.PlacementPolicy() +func TestPlacementPolicy_ReadFromV2(t *testing.T) { + t.Run("missing fields", func(t *testing.T) { + t.Run("replicas", func(t *testing.T) { + n := netmaptest.PlacementPolicy() + var m apinetmap.PlacementPolicy - t.Run("binary", func(t *testing.T) { - var v2 netmap.PlacementPolicy - require.NoError(t, v2.Unmarshal(v.Marshal())) + n.WriteToV2(&m) + m.Replicas = nil + require.ErrorContains(t, n.ReadFromV2(&m), "missing replicas") - require.Equal(t, v, v2) + n.WriteToV2(&m) + m.Replicas = []*apinetmap.Replica{} + require.ErrorContains(t, n.ReadFromV2(&m), "missing replicas") + }) }) +} - t.Run("json", func(t *testing.T) { - data, err := v.MarshalJSON() - require.NoError(t, err) - - var v2 netmap.PlacementPolicy - require.NoError(t, v2.UnmarshalJSON(data)) +func TestPlacementPolicy_Marshal(t *testing.T) { + t.Run("invalid binary", func(t *testing.T) { + var n netmap.PlacementPolicy + msg := []byte("definitely_not_protobuf") + err := n.Unmarshal(msg) + require.ErrorContains(t, err, "decode protobuf") + }) +} - require.Equal(t, v, v2) +func TestPlacementPolicy_UnmarshalJSON(t *testing.T) { + t.Run("invalid json", func(t *testing.T) { + var n netmap.PlacementPolicy + msg := []byte("definitely_not_protojson") + err := n.UnmarshalJSON(msg) + require.ErrorContains(t, err, "decode protojson") }) } @@ -71,291 +86,312 @@ func TestPlacementPolicy_ContainerBackupFactor(t *testing.T) { var p netmap.PlacementPolicy require.Zero(t, p.ContainerBackupFactor()) - p = netmaptest.PlacementPolicy() - p.SetContainerBackupFactor(42) - require.EqualValues(t, 42, p.ContainerBackupFactor()) + const val = 42 + p.SetContainerBackupFactor(val) + require.EqualValues(t, val, p.ContainerBackupFactor()) - var m netmapv2.PlacementPolicy - p.WriteToV2(&m) - require.EqualValues(t, 42, m.GetContainerBackupFactor()) + const otherVal = 13 + p.SetContainerBackupFactor(otherVal) + require.EqualValues(t, otherVal, p.ContainerBackupFactor()) - m.SetContainerBackupFactor(13) - err := p.ReadFromV2(m) - require.NoError(t, err) - require.EqualValues(t, 13, m.GetContainerBackupFactor()) -} + t.Run("encoding", func(t *testing.T) { + t.Run("binary", func(t *testing.T) { + var src, dst netmap.PlacementPolicy -func TestPlacementPolicy_Replicas(t *testing.T) { - var p netmap.PlacementPolicy - require.Empty(t, p.Replicas()) + dst.SetContainerBackupFactor(otherVal) - var r netmap.ReplicaDescriptor - var rs []netmap.ReplicaDescriptor + err := dst.Unmarshal(src.Marshal()) + require.NoError(t, err) + require.Zero(t, dst.ContainerBackupFactor()) - r.SetSelectorName("selector_1") - r.SetNumberOfObjects(1) - rs = append(rs, r) + src.SetContainerBackupFactor(val) - r.SetSelectorName("selector_2") - r.SetNumberOfObjects(2) - rs = append(rs, r) + err = dst.Unmarshal(src.Marshal()) + require.NoError(t, err) + require.EqualValues(t, val, dst.ContainerBackupFactor()) + }) + t.Run("api", func(t *testing.T) { + var src, dst netmap.PlacementPolicy + var msg apinetmap.PlacementPolicy - p.SetReplicas(rs) - require.Equal(t, rs, p.Replicas()) + // set required data just to satisfy decoder + src.SetReplicas(netmaptest.NReplicas(2)) - var m netmapv2.PlacementPolicy - p.WriteToV2(&m) - rsm := m.GetReplicas() - require.Len(t, rsm, 2) - rm := rsm[0] - require.Equal(t, "selector_1", rm.GetSelector()) - require.EqualValues(t, 1, rm.GetCount()) - rm = rsm[1] - require.Equal(t, "selector_2", rm.GetSelector()) - require.EqualValues(t, 2, rm.GetCount()) - - err := p.ReadFromV2(m) - require.NoError(t, err) - - rs = p.Replicas() - r = rs[0] - require.Equal(t, "selector_1", r.SelectorName()) - require.EqualValues(t, 1, r.NumberOfObjects()) - r = rs[1] - require.Equal(t, "selector_2", r.SelectorName()) - require.EqualValues(t, 2, r.NumberOfObjects()) -} + dst.SetContainerBackupFactor(otherVal) -func TestPlacementPolicy_Selectors(t *testing.T) { - var p netmap.PlacementPolicy - require.Empty(t, p.Selectors()) + src.WriteToV2(&msg) + require.Zero(t, msg.ContainerBackupFactor) + require.NoError(t, dst.ReadFromV2(&msg)) + require.Zero(t, dst.ContainerBackupFactor()) - var s netmap.Selector - var ss []netmap.Selector + src.SetContainerBackupFactor(val) - s.SetName("name_1") - s.SelectByBucketAttribute("bucket_1") - s.SetFilterName("filter_1") - s.SetNumberOfNodes(1) - s.SelectSame() - ss = append(ss, s) + src.WriteToV2(&msg) + require.EqualValues(t, val, msg.ContainerBackupFactor) + err := dst.ReadFromV2(&msg) + require.NoError(t, err) + require.EqualValues(t, val, dst.ContainerBackupFactor()) + }) + t.Run("json", func(t *testing.T) { + var src, dst netmap.PlacementPolicy - s.SetName("name_2") - s.SelectByBucketAttribute("bucket_2") - s.SetFilterName("filter_2") - s.SetNumberOfNodes(2) - s.SelectDistinct() - ss = append(ss, s) + dst.SetContainerBackupFactor(otherVal) - p.SetSelectors(ss) - require.Equal(t, ss, p.Selectors()) + j, err := src.MarshalJSON() + require.NoError(t, err) + err = dst.UnmarshalJSON(j) + require.NoError(t, err) + require.Zero(t, dst.ContainerBackupFactor()) + + src.SetContainerBackupFactor(val) - var m netmapv2.PlacementPolicy - p.WriteToV2(&m) - ssm := m.GetSelectors() - require.Len(t, ssm, 2) - sm := ssm[0] - require.Equal(t, "name_1", sm.GetName()) - require.Equal(t, "bucket_1", sm.GetAttribute()) - require.Equal(t, "filter_1", sm.GetFilter()) - require.EqualValues(t, 1, sm.GetCount()) - require.Equal(t, netmapv2.Same, sm.GetClause()) - sm = ssm[1] - require.Equal(t, "name_2", sm.GetName()) - require.Equal(t, "bucket_2", sm.GetAttribute()) - require.Equal(t, "filter_2", sm.GetFilter()) - require.EqualValues(t, 2, sm.GetCount()) - require.Equal(t, netmapv2.Distinct, sm.GetClause()) - - m.SetReplicas([]netmapv2.Replica{{}}) // required - err := p.ReadFromV2(m) - require.NoError(t, err) - - ss = p.Selectors() - s = ss[0] - require.Equal(t, "name_1", s.Name()) - require.Equal(t, "bucket_1", s.BucketAttribute()) - require.Equal(t, "filter_1", s.FilterName()) - require.EqualValues(t, 1, s.NumberOfNodes()) - require.True(t, s.IsSame()) - s = ss[1] - require.Equal(t, "name_2", s.Name()) - require.Equal(t, "bucket_2", s.BucketAttribute()) - require.Equal(t, "filter_2", s.FilterName()) - require.EqualValues(t, 2, s.NumberOfNodes()) - require.True(t, s.IsDistinct()) + j, err = src.MarshalJSON() + require.NoError(t, err) + err = dst.UnmarshalJSON(j) + require.NoError(t, err) + require.EqualValues(t, val, dst.ContainerBackupFactor()) + }) + }) } -func TestPlacementPolicy_Filters(t *testing.T) { +func TestPlacementPolicy_Replicas(t *testing.T) { var p netmap.PlacementPolicy - require.Empty(t, p.Filters()) - - var f netmap.Filter - var fs []netmap.Filter - // 'key_1' == 'val_1' - f.SetName("filter_1") - f.Equal("key_1", "val_1") - fs = append(fs, f) + require.Zero(t, p.Replicas()) + require.Zero(t, p.NumberOfReplicas()) - // 'key_2' != 'val_2' - f.SetName("filter_2") - f.NotEqual("key_2", "val_2") - fs = append(fs, f) - - // 'key_3_1' > 31 || 'key_3_2' >= 32 - var sub1 netmap.Filter - sub1.SetName("filter_3_1") - sub1.NumericGT("key_3_1", 3_1) + rs := netmaptest.NReplicas(3) + p.SetReplicas(rs) + require.Equal(t, rs, p.Replicas()) + require.EqualValues(t, 3, p.NumberOfReplicas()) + for i := range rs { + require.Equal(t, rs[i].NumberOfObjects(), p.ReplicaNumberByIndex(i)) + } + require.Panics(t, func() { p.ReplicaNumberByIndex(len(rs)) }) + + rsOther := netmaptest.NReplicas(2) + p.SetReplicas(rsOther) + require.Equal(t, rsOther, p.Replicas()) + + t.Run("encoding", func(t *testing.T) { + t.Run("binary", func(t *testing.T) { + var src, dst netmap.PlacementPolicy + + src.SetReplicas(rs) + err := dst.Unmarshal(src.Marshal()) + require.NoError(t, err) + require.Equal(t, rs, dst.Replicas()) + }) + t.Run("api", func(t *testing.T) { + var src, dst netmap.PlacementPolicy + var msg apinetmap.PlacementPolicy + + src.WriteToV2(&msg) + require.Zero(t, msg.Replicas) + err := dst.ReadFromV2(&msg) + require.ErrorContains(t, err, "missing replicas") + + rs := make([]netmap.ReplicaDescriptor, 3) + for i := range rs { + rs[i].SetNumberOfObjects(uint32(i + 1)) + rs[i].SetSelectorName("selector_" + strconv.Itoa(i+1)) + } + + src.SetReplicas(rs) + + src.WriteToV2(&msg) + require.Equal(t, []*apinetmap.Replica{ + {Count: 1, Selector: "selector_1"}, + {Count: 2, Selector: "selector_2"}, + {Count: 3, Selector: "selector_3"}, + }, msg.Replicas) + err = dst.ReadFromV2(&msg) + require.NoError(t, err) + require.Equal(t, rs, dst.Replicas()) + }) + t.Run("json", func(t *testing.T) { + var src, dst netmap.PlacementPolicy + + src.SetReplicas(rs) + j, err := src.MarshalJSON() + require.NoError(t, err) + + err = dst.UnmarshalJSON(j) + require.NoError(t, err) + require.Equal(t, rs, dst.Replicas()) + }) + }) +} - var sub2 netmap.Filter - sub2.SetName("filter_3_2") - sub2.NumericGE("key_3_2", 3_2) +func TestPlacementPolicy_Filters(t *testing.T) { + var p netmap.PlacementPolicy - f.SetName("filter_3") - f.LogicalOR(sub1, sub2) - fs = append(fs, f) + require.Zero(t, p.Filters()) - // 'key_4_1' < 41 || 'key_4_2' <= 42 - sub1.SetName("filter_4_1") - sub1.NumericLT("key_4_1", 4_1) + fs := netmaptest.NFilters(3) + p.SetFilters(fs) + require.Equal(t, fs, p.Filters()) - sub2.SetName("filter_4_2") - sub2.NumericLE("key_4_2", 4_2) + fsOther := netmaptest.NFilters(2) + p.SetFilters(fsOther) + require.Equal(t, fsOther, p.Filters()) + + t.Run("encoding", func(t *testing.T) { + t.Run("binary", func(t *testing.T) { + var src, dst netmap.PlacementPolicy + + dst.SetFilters(fsOther) + src.SetFilters(fs) + + err := dst.Unmarshal(src.Marshal()) + require.NoError(t, err) + require.Equal(t, fs, dst.Filters()) + }) + t.Run("api", func(t *testing.T) { + var src, dst netmap.PlacementPolicy + var msg apinetmap.PlacementPolicy + + // set required data just to satisfy decoder + src.SetReplicas(netmaptest.NReplicas(3)) + + dst.SetFilters(fs) + + src.WriteToV2(&msg) + require.Zero(t, msg.Filters) + err := dst.ReadFromV2(&msg) + require.NoError(t, err) + require.Zero(t, dst.Filters()) + + fs := make([]netmap.Filter, 8) + for i := range fs { + fs[i].SetName("name" + strconv.Itoa(i)) + } + fs[0].Equal("key0", "val0") + fs[1].NotEqual("key1", "val1") + fs[2].NumericGT("key2", 2) + fs[3].NumericGE("key3", 3) + fs[4].NumericLT("key4", 4) + fs[5].NumericLE("key5", 5) + subs0 := make([]netmap.Filter, 2) + subs0[0].SetName("sub0_0") + subs0[0].Equal("key0_0", "val0_0") + subs0[1].SetName("sub0_1") + subs0[1].NotEqual("key0_1", "val0_1") + fs[6].LogicalOR(subs0...) + subs1 := make([]netmap.Filter, 2) + subs1[0].SetName("sub1_0") + subs1[0].NumericGT("key1_0", 6) + subs1[1].SetName("sub1_1") + subs1[1].NumericGE("key1_1", 7) + fs[7].LogicalAND(subs1...) + + src.SetFilters(fs) + + src.WriteToV2(&msg) + require.Equal(t, []*apinetmap.Filter{ + {Name: "name0", Key: "key0", Op: apinetmap.Operation_EQ, Value: "val0"}, + {Name: "name1", Key: "key1", Op: apinetmap.Operation_NE, Value: "val1"}, + {Name: "name2", Key: "key2", Op: apinetmap.Operation_GT, Value: "2"}, + {Name: "name3", Key: "key3", Op: apinetmap.Operation_GE, Value: "3"}, + {Name: "name4", Key: "key4", Op: apinetmap.Operation_LT, Value: "4"}, + {Name: "name5", Key: "key5", Op: apinetmap.Operation_LE, Value: "5"}, + {Name: "name6", Key: "", Op: apinetmap.Operation_OR, Value: "", Filters: []*apinetmap.Filter{ + {Name: "sub0_0", Key: "key0_0", Op: apinetmap.Operation_EQ, Value: "val0_0"}, + {Name: "sub0_1", Key: "key0_1", Op: apinetmap.Operation_NE, Value: "val0_1"}, + }}, + {Name: "name7", Key: "", Op: apinetmap.Operation_AND, Value: "", Filters: []*apinetmap.Filter{ + {Name: "sub1_0", Key: "key1_0", Op: apinetmap.Operation_GT, Value: "6"}, + {Name: "sub1_1", Key: "key1_1", Op: apinetmap.Operation_GE, Value: "7"}, + }}, + }, msg.Filters) + err = dst.ReadFromV2(&msg) + require.NoError(t, err) + require.Equal(t, fs, dst.Filters()) + }) + t.Run("json", func(t *testing.T) { + var src, dst netmap.PlacementPolicy + + src.SetFilters(fs) + j, err := src.MarshalJSON() + require.NoError(t, err) + + err = dst.UnmarshalJSON(j) + require.NoError(t, err) + require.Equal(t, fs, dst.Filters()) + }) + }) +} +func TestPlacementPolicy_Selectors(t *testing.T) { + var p netmap.PlacementPolicy - f = netmap.Filter{} - f.SetName("filter_4") - f.LogicalAND(sub1, sub2) - fs = append(fs, f) + require.Zero(t, p.Selectors()) - p.SetFilters(fs) - require.Equal(t, fs, p.Filters()) + ss := netmaptest.NSelectors(3) + p.SetSelectors(ss) + require.Equal(t, ss, p.Selectors()) - var m netmapv2.PlacementPolicy - p.WriteToV2(&m) - fsm := m.GetFilters() - require.Len(t, fsm, 4) - // 1 - fm := fsm[0] - require.Equal(t, "filter_1", fm.GetName()) - require.Equal(t, "key_1", fm.GetKey()) - require.Equal(t, netmapv2.EQ, fm.GetOp()) - require.Equal(t, "val_1", fm.GetValue()) - require.Zero(t, fm.GetFilters()) - // 2 - fm = fsm[1] - require.Equal(t, "filter_2", fm.GetName()) - require.Equal(t, "key_2", fm.GetKey()) - require.Equal(t, netmapv2.NE, fm.GetOp()) - require.Equal(t, "val_2", fm.GetValue()) - require.Zero(t, fm.GetFilters()) - // 3 - fm = fsm[2] - require.Equal(t, "filter_3", fm.GetName()) - require.Zero(t, fm.GetKey()) - require.Equal(t, netmapv2.OR, fm.GetOp()) - require.Zero(t, fm.GetValue()) - // 3.1 - subm := fm.GetFilters() - require.Len(t, subm, 2) - fm = subm[0] - require.Equal(t, "filter_3_1", fm.GetName()) - require.Equal(t, "key_3_1", fm.GetKey()) - require.Equal(t, netmapv2.GT, fm.GetOp()) - require.Equal(t, "31", fm.GetValue()) - require.Zero(t, fm.GetFilters()) - // 3.2 - fm = subm[1] - require.Equal(t, "filter_3_2", fm.GetName()) - require.Equal(t, "key_3_2", fm.GetKey()) - require.Equal(t, netmapv2.GE, fm.GetOp()) - require.Equal(t, "32", fm.GetValue()) - require.Zero(t, fm.GetFilters()) - // 4 - fm = fsm[3] - require.Equal(t, "filter_4", fm.GetName()) - require.Zero(t, fm.GetKey()) - require.Equal(t, netmapv2.AND, fm.GetOp()) - require.Zero(t, fm.GetValue()) - // 4.1 - subm = fm.GetFilters() - require.Len(t, subm, 2) - fm = subm[0] - require.Equal(t, "filter_4_1", fm.GetName()) - require.Equal(t, "key_4_1", fm.GetKey()) - require.Equal(t, netmapv2.LT, fm.GetOp()) - require.Equal(t, "41", fm.GetValue()) - require.Zero(t, fm.GetFilters()) - // 4.2 - fm = subm[1] - require.Equal(t, "filter_4_2", fm.GetName()) - require.Equal(t, "key_4_2", fm.GetKey()) - require.Equal(t, netmapv2.LE, fm.GetOp()) - require.Equal(t, "42", fm.GetValue()) - require.Zero(t, fm.GetFilters()) - - m.SetReplicas([]netmapv2.Replica{{}}) // required - err := p.ReadFromV2(m) - require.NoError(t, err) - - fs = p.Filters() - require.Len(t, fs, 4) - // 1 - f = fs[0] - require.Equal(t, "filter_1", f.Name()) - require.Equal(t, "key_1", f.Key()) - require.Equal(t, netmap.FilterOpEQ, f.Op()) - require.Equal(t, "val_1", f.Value()) - require.Zero(t, f.SubFilters()) - // 2 - f = fs[1] - require.Equal(t, "filter_2", f.Name()) - require.Equal(t, "key_2", f.Key()) - require.Equal(t, netmap.FilterOpNE, f.Op()) - require.Equal(t, "val_2", f.Value()) - require.Zero(t, f.SubFilters()) - // 3 - f = fs[2] - require.Equal(t, "filter_3", f.Name()) - require.Zero(t, f.Key()) - require.Equal(t, netmap.FilterOpOR, f.Op()) - require.Zero(t, f.Value()) - // 3.1 - sub := f.SubFilters() - require.Len(t, sub, 2) - f = sub[0] - require.Equal(t, "filter_3_1", f.Name()) - require.Equal(t, "key_3_1", f.Key()) - require.Equal(t, netmap.FilterOpGT, f.Op()) - require.Equal(t, "31", f.Value()) - require.Zero(t, f.SubFilters()) - // 3.2 - f = sub[1] - require.Equal(t, "filter_3_2", f.Name()) - require.Equal(t, "key_3_2", f.Key()) - require.Equal(t, netmap.FilterOpGE, f.Op()) - require.Equal(t, "32", f.Value()) - require.Zero(t, f.SubFilters()) - // 4 - f = fs[3] - require.Equal(t, "filter_4", f.Name()) - require.Zero(t, f.Key()) - require.Equal(t, netmap.FilterOpAND, f.Op()) - require.Zero(t, f.Value()) - // 4.1 - sub = f.SubFilters() - require.Len(t, sub, 2) - f = sub[0] - require.Equal(t, "filter_4_1", f.Name()) - require.Equal(t, "key_4_1", f.Key()) - require.Equal(t, netmap.FilterOpLT, f.Op()) - require.Equal(t, "41", f.Value()) - require.Zero(t, f.SubFilters()) - // 4.2 - f = sub[1] - require.Equal(t, "filter_4_2", f.Name()) - require.Equal(t, "key_4_2", f.Key()) - require.Equal(t, netmap.FilterOpLE, f.Op()) - require.Equal(t, "42", f.Value()) - require.Zero(t, f.SubFilters()) + ssOther := netmaptest.NSelectors(2) + p.SetSelectors(ssOther) + require.Equal(t, ssOther, p.Selectors()) + + t.Run("encoding", func(t *testing.T) { + t.Run("binary", func(t *testing.T) { + var src, dst netmap.PlacementPolicy + + dst.SetSelectors(ssOther) + src.SetSelectors(ss) + + err := dst.Unmarshal(src.Marshal()) + require.NoError(t, err) + require.Equal(t, ss, dst.Selectors()) + }) + t.Run("api", func(t *testing.T) { + var src, dst netmap.PlacementPolicy + var msg apinetmap.PlacementPolicy + + // set required data just to satisfy decoder + src.SetReplicas(netmaptest.NReplicas(3)) + + dst.SetSelectors(ss) + + src.WriteToV2(&msg) + require.Zero(t, msg.Selectors) + err := dst.ReadFromV2(&msg) + require.NoError(t, err) + require.Zero(t, dst.Selectors()) + + ss := make([]netmap.Selector, 2) + for i := range ss { + si := strconv.Itoa(i + 1) + ss[i].SetName("name" + si) + ss[i].SetFilterName("filter" + si) + ss[i].SelectByBucketAttribute("bucket" + si) + ss[i].SetNumberOfNodes(uint32(i + 1)) + } + ss[0].SelectSame() + ss[1].SelectDistinct() + + src.SetSelectors(ss) + + src.WriteToV2(&msg) + require.Equal(t, []*apinetmap.Selector{ + {Name: "name1", Count: 1, Clause: apinetmap.Clause_SAME, Attribute: "bucket1", Filter: "filter1"}, + {Name: "name2", Count: 2, Clause: apinetmap.Clause_DISTINCT, Attribute: "bucket2", Filter: "filter2"}, + }, msg.Selectors) + err = dst.ReadFromV2(&msg) + require.NoError(t, err) + require.Equal(t, ss, dst.Selectors()) + }) + t.Run("json", func(t *testing.T) { + var src, dst netmap.PlacementPolicy + + src.SetSelectors(ss) + j, err := src.MarshalJSON() + require.NoError(t, err) + + err = dst.UnmarshalJSON(j) + require.NoError(t, err) + require.Equal(t, ss, dst.Selectors()) + }) + }) } diff --git a/netmap/selector.go b/netmap/selector.go index 1fb2c8b16..6c9c30091 100644 --- a/netmap/selector.go +++ b/netmap/selector.go @@ -5,21 +5,20 @@ import ( "sort" "github.com/nspcc-dev/hrw/v2" - "github.com/nspcc-dev/neofs-api-go/v2/netmap" ) // processSelectors processes selectors and returns error is any of them is invalid. func (c *context) processSelectors(p PlacementPolicy) error { for i := range p.selectors { - fName := p.selectors[i].GetFilter() + fName := p.selectors[i].FilterName() if fName != mainFilterName { - _, ok := c.processedFilters[p.selectors[i].GetFilter()] + _, ok := c.processedFilters[fName] if !ok { return fmt.Errorf("%w: SELECT FROM '%s'", errFilterNotFound, fName) } } - sName := p.selectors[i].GetName() + sName := p.selectors[i].Name() c.processedSelectors[sName] = &p.selectors[i] @@ -36,12 +35,12 @@ func (c *context) processSelectors(p PlacementPolicy) error { // calcNodesCount returns number of buckets and minimum number of nodes in every bucket // for the given selector. -func calcNodesCount(s netmap.Selector) (int, int) { - switch s.GetClause() { - case netmap.Same: - return 1, int(s.GetCount()) +func calcNodesCount(s Selector) (int, int) { + switch { + case s.IsSame(): + return 1, int(s.NumberOfNodes()) default: - return int(s.GetCount()), 1 + return int(s.NumberOfNodes()), 1 } } @@ -56,12 +55,12 @@ func calcBucketWeight(ns nodes, a aggregator, wf weightFunc) float64 { // getSelection returns nodes grouped by s.attribute. // Last argument specifies if more buckets can be used to fulfill CBF. -func (c *context) getSelection(_ PlacementPolicy, s netmap.Selector) ([]nodes, error) { +func (c *context) getSelection(_ PlacementPolicy, s Selector) ([]nodes, error) { bucketCount, nodesInBucket := calcNodesCount(s) buckets := c.getSelectionBase(s) if len(buckets) < bucketCount { - return nil, fmt.Errorf("%w: '%s'", errNotEnoughNodes, s.GetName()) + return nil, fmt.Errorf("%w: '%s'", errNotEnoughNodes, s.Name()) } // We need deterministic output in case there is no pivot. @@ -69,7 +68,7 @@ func (c *context) getSelection(_ PlacementPolicy, s netmap.Selector) ([]nodes, e // However, because initial order influences HRW order for buckets with equal weights, // we also need to have deterministic input to HRW sorting routine. if len(c.hrwSeed) == 0 { - if s.GetAttribute() == "" { + if s.BucketAttribute() == "" { sort.Slice(buckets, func(i, j int) bool { return less(buckets[i].nodes[0], buckets[j].nodes[0]) }) @@ -97,7 +96,7 @@ func (c *context) getSelection(_ PlacementPolicy, s netmap.Selector) ([]nodes, e // Fallback to using minimum allowed backup factor (1). res = append(res, fallback...) if len(res) < bucketCount { - return nil, fmt.Errorf("%w: '%s'", errNotEnoughNodes, s.GetName()) + return nil, fmt.Errorf("%w: '%s'", errNotEnoughNodes, s.Name()) } } @@ -110,7 +109,7 @@ func (c *context) getSelection(_ PlacementPolicy, s netmap.Selector) ([]nodes, e hrw.SortWeighted(res, weights, c.hrwSeedHash) } - if s.GetAttribute() == "" { + if s.BucketAttribute() == "" { res, fallback = res[:bucketCount], res[bucketCount:] for i := range fallback { index := i % bucketCount @@ -131,13 +130,13 @@ type nodeAttrPair struct { // getSelectionBase returns nodes grouped by selector attribute. // It it guaranteed that each pair will contain at least one node. -func (c *context) getSelectionBase(s netmap.Selector) []nodeAttrPair { - fName := s.GetFilter() +func (c *context) getSelectionBase(s Selector) []nodeAttrPair { + fName := s.FilterName() f := c.processedFilters[fName] isMain := fName == mainFilterName result := []nodeAttrPair{} nodeMap := map[string][]NodeInfo{} - attr := s.GetAttribute() + attr := s.BucketAttribute() for i := range c.netMap.nodes { if isMain || c.match(f, c.netMap.nodes[i]) { diff --git a/netmap/selector_test.go b/netmap/selector_test.go index 58f32e559..d2c10ac5f 100644 --- a/netmap/selector_test.go +++ b/netmap/selector_test.go @@ -7,7 +7,6 @@ import ( "testing" "github.com/nspcc-dev/hrw/v2" - "github.com/nspcc-dev/neofs-api-go/v2/netmap" cid "github.com/nspcc-dev/neofs-sdk-go/container/id" "github.com/stretchr/testify/require" ) @@ -73,8 +72,8 @@ func BenchmarkPolicyHRWType(b *testing.B) { newSelector("loc1", "Location", 1, "loc1", (*Selector).SelectSame), newSelector("loc2", "Location", 1, "loc2", (*Selector).SelectSame)}, []Filter{ - newFilter("loc1", "Location", "Shanghai", netmap.EQ), - newFilter("loc2", "Location", "Shanghai", netmap.NE), + newFilter("loc1", "Location", "Shanghai", FilterOpEQ), + newFilter("loc2", "Location", "Shanghai", FilterOpNE), }) nodes := make([]NodeInfo, netmapSize) @@ -118,8 +117,8 @@ func TestPlacementPolicy_DeterministicOrder(t *testing.T) { newSelector("loc1", "Location", 1, "loc1", (*Selector).SelectSame), newSelector("loc2", "Location", 1, "loc2", (*Selector).SelectSame)}, []Filter{ - newFilter("loc1", "Location", "Shanghai", netmap.EQ), - newFilter("loc2", "Location", "Shanghai", netmap.NE), + newFilter("loc1", "Location", "Shanghai", FilterOpEQ), + newFilter("loc2", "Location", "Shanghai", FilterOpNE), }) nodeList := make([]NodeInfo, netmapSize) @@ -174,8 +173,8 @@ func TestPlacementPolicy_ProcessSelectors(t *testing.T) { newSelector("Main", "Country", 3, "*", (*Selector).SelectDistinct), }, []Filter{ - newFilter("FromRU", "Country", "Russia", netmap.EQ), - newFilter("Good", "Rating", "4", netmap.GE), + newFilter("FromRU", "Country", "Russia", FilterOpEQ), + newFilter("Good", "Rating", "4", FilterOpGE), }) nodes := []NodeInfo{ nodeInfoFromAttributes("Country", "Russia", "Rating", "1", "City", "SPB"), @@ -199,13 +198,13 @@ func TestPlacementPolicy_ProcessSelectors(t *testing.T) { require.NoError(t, c.processSelectors(p)) for _, s := range p.selectors { - sel := c.selections[s.GetName()] - s := c.processedSelectors[s.GetName()] + sel := c.selections[s.Name()] + s := c.processedSelectors[s.Name()] bucketCount, nodesInBucket := calcNodesCount(*s) nodesInBucket *= int(c.cbf) - targ := fmt.Sprintf("selector '%s'", s.GetName()) + targ := fmt.Sprintf("selector '%s'", s.Name()) require.Equal(t, bucketCount, len(sel), targ) - fName := s.GetFilter() + fName := s.FilterName() for _, res := range sel { require.Equal(t, nodesInBucket, len(res), targ) for j := range res { @@ -219,11 +218,9 @@ func TestSelector_SetName(t *testing.T) { const name = "some name" var s Selector - require.Zero(t, s.m.GetName()) require.Zero(t, s.Name()) s.SetName(name) - require.Equal(t, name, s.m.GetName()) require.Equal(t, name, s.Name()) } @@ -231,29 +228,24 @@ func TestSelector_SetNumberOfNodes(t *testing.T) { const num = 3 var s Selector - require.Zero(t, s.m.GetCount()) require.Zero(t, s.NumberOfNodes()) s.SetNumberOfNodes(num) - require.EqualValues(t, num, s.m.GetCount()) require.EqualValues(t, num, s.NumberOfNodes()) } func TestSelectorClauses(t *testing.T) { var s Selector - require.Equal(t, netmap.UnspecifiedClause, s.m.GetClause()) require.False(t, s.IsSame()) require.False(t, s.IsDistinct()) s.SelectDistinct() - require.Equal(t, netmap.Distinct, s.m.GetClause()) require.False(t, s.IsSame()) require.True(t, s.IsDistinct()) s.SelectSame() - require.Equal(t, netmap.Same, s.m.GetClause()) require.True(t, s.IsSame()) require.False(t, s.IsDistinct()) } @@ -262,18 +254,18 @@ func TestSelector_SelectByBucketAttribute(t *testing.T) { const attr = "some attribute" var s Selector - require.Zero(t, s.m.GetAttribute()) + require.Zero(t, s.BucketAttribute()) s.SelectByBucketAttribute(attr) - require.Equal(t, attr, s.m.GetAttribute()) + require.Equal(t, attr, s.BucketAttribute()) } func TestSelector_SetFilterName(t *testing.T) { const fName = "some filter" var s Selector - require.Zero(t, s.m.GetFilter()) + require.Zero(t, s.FilterName()) s.SetFilterName(fName) - require.Equal(t, fName, s.m.GetFilter()) + require.Equal(t, fName, s.FilterName()) } diff --git a/netmap/test/generate.go b/netmap/test/generate.go index a0b1fb6fc..46e696d72 100644 --- a/netmap/test/generate.go +++ b/netmap/test/generate.go @@ -2,16 +2,38 @@ package netmaptest import ( "math/rand" + "strconv" "github.com/nspcc-dev/neofs-sdk-go/netmap" ) -func filter(withInner bool) (x netmap.Filter) { - x.SetName("name") - if withInner { - x.LogicalOR(filter(false), filter(false)) - } else { - x.NumericGE("epoch", 13) +func filter(allowComplex bool) (x netmap.Filter) { + x.SetName("filter_" + strconv.Itoa(rand.Int())) + rnd := rand.Int() % 8 + switch rnd { + case 0: + x.Equal("key_"+strconv.Itoa(rand.Int()), "value_"+strconv.Itoa(rand.Int())) + case 1: + x.NotEqual("key_"+strconv.Itoa(rand.Int()), "value_"+strconv.Itoa(rand.Int())) + case 2: + x.NumericGT("key_"+strconv.Itoa(rand.Int()), rand.Int63()) + case 3: + x.NumericGE("key_"+strconv.Itoa(rand.Int()), rand.Int63()) + case 4: + x.NumericLT("key_"+strconv.Itoa(rand.Int()), rand.Int63()) + case 5: + x.NumericLE("key_"+strconv.Itoa(rand.Int()), rand.Int63()) + } + if allowComplex { + fs := make([]netmap.Filter, 1+rand.Int()%3) + for i := range fs { + fs[i] = filter(false) + } + if rnd == 6 { + x.LogicalAND(fs...) + } else { + x.LogicalOR(fs...) + } } return x @@ -22,61 +44,134 @@ func Filter() netmap.Filter { return filter(true) } +// NFilters returns n random netmap.Filter instances. +func NFilters(n int) []netmap.Filter { + res := make([]netmap.Filter, n) + for i := range res { + res[i] = Filter() + } + return res +} + // Replica returns random netmap.ReplicaDescriptor. -func Replica() (x netmap.ReplicaDescriptor) { - x.SetNumberOfObjects(666) - x.SetSelectorName("selector") +func Replica() netmap.ReplicaDescriptor { + var x netmap.ReplicaDescriptor + x.SetNumberOfObjects(rand.Uint32()) + x.SetSelectorName("selector_" + strconv.Itoa(rand.Int())) - return + return x +} + +// NReplicas returns n random netmap.ReplicaDescriptor instances. +func NReplicas(n int) []netmap.ReplicaDescriptor { + res := make([]netmap.ReplicaDescriptor, n) + for i := range res { + res[i] = Replica() + } + return res } // Selector returns random netmap.Selector. -func Selector() (x netmap.Selector) { - x.SetNumberOfNodes(11) - x.SetName("name") - x.SetFilterName("filter") - x.SelectByBucketAttribute("attribute") - x.SelectDistinct() - - return +func Selector() netmap.Selector { + var x netmap.Selector + x.SetNumberOfNodes(uint32(rand.Int())) + x.SetName("selector" + strconv.Itoa(rand.Int())) + x.SetFilterName("filter_" + strconv.Itoa(rand.Int())) + x.SelectByBucketAttribute("attribute_" + strconv.Itoa(rand.Int())) + switch rand.Int() % 3 { + case 1: + x.SelectSame() + case 2: + x.SelectDistinct() + } + + return x +} + +// NSelectors returns n random netmap.Selector instances. +func NSelectors(n int) []netmap.Selector { + res := make([]netmap.Selector, n) + for i := range res { + res[i] = Selector() + } + return res } // PlacementPolicy returns random netmap.PlacementPolicy. -func PlacementPolicy() (p netmap.PlacementPolicy) { - p.SetContainerBackupFactor(9) - p.SetFilters([]netmap.Filter{Filter(), Filter()}) - p.SetReplicas([]netmap.ReplicaDescriptor{Replica(), Replica()}) - p.SetSelectors([]netmap.Selector{Selector(), Selector()}) +func PlacementPolicy() netmap.PlacementPolicy { + var p netmap.PlacementPolicy + p.SetContainerBackupFactor(uint32(rand.Int())) + p.SetReplicas(NReplicas(1 + rand.Int()%3)) + if n := rand.Int() % 4; n > 0 { + p.SetFilters(NFilters(n)) + } + if n := rand.Int() % 4; n > 0 { + p.SetSelectors(NSelectors(n)) + } - return + return p } // NetworkInfo returns random netmap.NetworkInfo. -func NetworkInfo() (x netmap.NetworkInfo) { - x.SetCurrentEpoch(21) - x.SetMagicNumber(32) - x.SetMsPerBlock(43) - x.SetAuditFee(1) - x.SetStoragePrice(2) - x.SetContainerFee(3) - x.SetEigenTrustAlpha(0.4) - x.SetNumberOfEigenTrustIterations(5) - x.SetEpochDuration(6) - x.SetIRCandidateFee(7) - x.SetMaxObjectSize(8) - x.SetWithdrawalFee(9) - - return +func NetworkInfo() netmap.NetworkInfo { + var x netmap.NetworkInfo + x.SetCurrentEpoch(rand.Uint64()) + x.SetMagicNumber(rand.Uint64()) + x.SetMsPerBlock(rand.Int63()) + x.SetAuditFee(rand.Uint64()) + x.SetStoragePrice(rand.Uint64()) + x.SetContainerFee(rand.Uint64()) + x.SetNamedContainerFee(rand.Uint64()) + x.SetEigenTrustAlpha(rand.Float64()) + x.SetNumberOfEigenTrustIterations(rand.Uint64()) + x.SetEpochDuration(rand.Uint64()) + x.SetIRCandidateFee(rand.Uint64()) + x.SetMaxObjectSize(rand.Uint64()) + x.SetWithdrawalFee(rand.Uint64()) + x.SetHomomorphicHashingDisabled(rand.Int()%2 == 0) + x.SetMaintenanceModeAllowed(rand.Int()%2 == 0) + for i := 0; i < rand.Int()%4; i++ { + val := make([]byte, rand.Int()%64+1) + rand.Read(val) + x.SetRawNetworkParameter("prm_"+strconv.Itoa(rand.Int()), val) + } + + return x } // NodeInfo returns random netmap.NodeInfo. -func NodeInfo() (x netmap.NodeInfo) { +func NodeInfo() netmap.NodeInfo { + var x netmap.NodeInfo key := make([]byte, 33) //nolint:staticcheck rand.Read(key) x.SetPublicKey(key) - x.SetNetworkEndpoints("1", "2", "3") - return + endpoints := make([]string, 1+rand.Int()%3) + for i := range endpoints { + endpoints[i] = "endpoint_" + strconv.Itoa(rand.Int()) + } + x.SetNetworkEndpoints(endpoints) + + return x +} + +// NNodes returns n random netmap.NodeInfo instances. +func NNodes(n int) []netmap.NodeInfo { + res := make([]netmap.NodeInfo, n) + for i := range res { + res[i] = NodeInfo() + } + return res +} + +// Netmap returns random netmap.NetMap. +func Netmap() netmap.NetMap { + var x netmap.NetMap + x.SetEpoch(rand.Uint64()) + if n := rand.Int() % 4; n > 0 { + x.SetNodes(NNodes(n)) + } + return x } diff --git a/netmap/test/generate_test.go b/netmap/test/generate_test.go new file mode 100644 index 000000000..c5ff23856 --- /dev/null +++ b/netmap/test/generate_test.go @@ -0,0 +1,46 @@ +package netmaptest_test + +import ( + "testing" + + apinetmap "github.com/nspcc-dev/neofs-sdk-go/api/netmap" + "github.com/nspcc-dev/neofs-sdk-go/netmap" + netmaptest "github.com/nspcc-dev/neofs-sdk-go/netmap/test" + "github.com/stretchr/testify/require" +) + +func TestPlacementPolicy(t *testing.T) { + v := netmaptest.PlacementPolicy() + require.NotEqual(t, v, netmaptest.PlacementPolicy()) + + var v2 netmap.PlacementPolicy + require.NoError(t, v2.Unmarshal(v.Marshal())) + require.Equal(t, v, v2) + + var m apinetmap.PlacementPolicy + v.WriteToV2(&m) + var v3 netmap.PlacementPolicy + require.NoError(t, v3.ReadFromV2(&m)) + require.Equal(t, v, v3) + + j, err := v.MarshalJSON() + require.NoError(t, err) + var v4 netmap.PlacementPolicy + require.NoError(t, v4.UnmarshalJSON(j)) + require.Equal(t, v, v4) +} + +func TestNetworkInfo(t *testing.T) { + v := netmaptest.NetworkInfo() + require.NotEqual(t, v, netmaptest.NetworkInfo()) + + var v2 netmap.NetworkInfo + require.NoError(t, v2.Unmarshal(v.Marshal())) + require.Equal(t, v, v2) + + var m apinetmap.NetworkInfo + v.WriteToV2(&m) + var v3 netmap.NetworkInfo + require.NoError(t, v3.ReadFromV2(&m)) + require.Equal(t, v, v3) +} diff --git a/object/attribute.go b/object/attribute.go deleted file mode 100644 index 4b6b5e2e8..000000000 --- a/object/attribute.go +++ /dev/null @@ -1,98 +0,0 @@ -package object - -import ( - "github.com/nspcc-dev/neofs-api-go/v2/object" -) - -// Various system attributes. -const ( - // AttributeExpirationEpoch is a key to an object attribute that determines - // after what epoch the object becomes expired. Objects that do not have this - // attribute never expire. - // - // Reaction of NeoFS system components to the objects' 'expired' property may - // vary. For example, in the basic scenario, expired objects are auto-deleted - // from the storage. Detailed behavior can be found in the NeoFS Specification. - // - // Note that the value determines exactly the last epoch of the object's - // relevance: for example, with the value N, the object is relevant in epoch N - // and expired in any epoch starting from N+1. - AttributeExpirationEpoch = object.SysAttributeExpEpoch -) - -// Attribute represents v2-compatible object attribute. -type Attribute object.Attribute - -// NewAttributeFromV2 wraps v2 [object.Attribute] message to [Attribute]. -// -// Nil [object.Attribute] converts to nil. -func NewAttributeFromV2(aV2 *object.Attribute) *Attribute { - return (*Attribute)(aV2) -} - -// NewAttribute creates and initializes new [Attribute]. -func NewAttribute(key, value string) *Attribute { - attr := new(object.Attribute) - attr.SetKey(key) - attr.SetValue(value) - - return NewAttributeFromV2(attr) -} - -// Key returns key to the object attribute. -func (a *Attribute) Key() string { - return (*object.Attribute)(a).GetKey() -} - -// SetKey sets key to the object attribute. -func (a *Attribute) SetKey(v string) { - (*object.Attribute)(a).SetKey(v) -} - -// Value return value of the object attribute. -func (a *Attribute) Value() string { - return (*object.Attribute)(a).GetValue() -} - -// SetValue sets value of the object attribute. -func (a *Attribute) SetValue(v string) { - (*object.Attribute)(a).SetValue(v) -} - -// ToV2 converts [Attribute] to v2 [object.Attribute] message. -// -// Nil [Attribute] converts to nil. -// -// The value returned shares memory with the structure itself, so changing it can lead to data corruption. -// Make a copy if you need to change it. -func (a *Attribute) ToV2() *object.Attribute { - return (*object.Attribute)(a) -} - -// Marshal marshals [Attribute] into a protobuf binary form. -// -// See also [Attribute.Unmarshal]. -func (a *Attribute) Marshal() ([]byte, error) { - return (*object.Attribute)(a).StableMarshal(nil), nil -} - -// Unmarshal unmarshals protobuf binary representation of [Attribute]. -// -// See also [Attribute.Marshal]. -func (a *Attribute) Unmarshal(data []byte) error { - return (*object.Attribute)(a).Unmarshal(data) -} - -// MarshalJSON encodes [Attribute] to protobuf JSON format. -// -// See also [Attribute.UnmarshalJSON]. -func (a *Attribute) MarshalJSON() ([]byte, error) { - return (*object.Attribute)(a).MarshalJSON() -} - -// UnmarshalJSON decodes [Attribute] from protobuf JSON format. -// -// See also [Attribute.MarshalJSON]. -func (a *Attribute) UnmarshalJSON(data []byte) error { - return (*object.Attribute)(a).UnmarshalJSON(data) -} diff --git a/object/attribute_test.go b/object/attribute_test.go deleted file mode 100644 index f87bc73f0..000000000 --- a/object/attribute_test.go +++ /dev/null @@ -1,91 +0,0 @@ -package object - -import ( - "testing" - - "github.com/nspcc-dev/neofs-api-go/v2/object" - "github.com/stretchr/testify/require" -) - -func TestAttribute(t *testing.T) { - key, val := "some key", "some value" - - a := NewAttribute(key, val) - - require.Equal(t, key, a.Key()) - require.Equal(t, val, a.Value()) - - aV2 := a.ToV2() - - require.Equal(t, key, aV2.GetKey()) - require.Equal(t, val, aV2.GetValue()) -} - -func TestAttributeEncoding(t *testing.T) { - a := NewAttribute("key", "value") - - t.Run("binary", func(t *testing.T) { - data, err := a.Marshal() - require.NoError(t, err) - - a2 := NewAttribute("", "") - require.NoError(t, a2.Unmarshal(data)) - - require.Equal(t, a, a2) - }) - - t.Run("json", func(t *testing.T) { - data, err := a.MarshalJSON() - require.NoError(t, err) - - a2 := NewAttribute("", "") - require.NoError(t, a2.UnmarshalJSON(data)) - - require.Equal(t, a, a2) - }) -} - -func TestNewAttributeFromV2(t *testing.T) { - t.Run("from nil", func(t *testing.T) { - var x *object.Attribute - - require.Nil(t, NewAttributeFromV2(x)) - }) -} - -func TestAttribute_ToV2(t *testing.T) { - t.Run("nil", func(t *testing.T) { - var x *Attribute - - require.Nil(t, x.ToV2()) - }) -} - -func TestNewAttribute(t *testing.T) { - t.Run("default values", func(t *testing.T) { - a := NewAttribute("", "") - - // check initial values - require.Empty(t, a.Key()) - require.Empty(t, a.Value()) - - // convert to v2 message - aV2 := a.ToV2() - - require.Empty(t, aV2.GetKey()) - require.Empty(t, aV2.GetValue()) - }) - - t.Run("pre installed key and value", func(t *testing.T) { - a := NewAttribute("key", "value") - - require.NotEmpty(t, a.Key()) - require.NotEmpty(t, a.Value()) - - // convert to v2 message - aV2 := a.ToV2() - - require.NotEmpty(t, aV2.GetKey()) - require.NotEmpty(t, aV2.GetValue()) - }) -} diff --git a/object/attributes.go b/object/attributes.go new file mode 100644 index 000000000..b26fe96f7 --- /dev/null +++ b/object/attributes.go @@ -0,0 +1,102 @@ +package object + +import ( + "fmt" + "strconv" + "time" +) + +// Various attributes popular in applications. +const ( + attributeName = "Name" + attributeFileName = "FileName" + attributeFilePath = "FilePath" + attributeTimestamp = "Timestamp" + attributeContentType = "Content-Type" +) + +// SetName associates given name with the object by setting its 'Name' +// attribute. The name must not be empty and is expected to be human-readable. +// Use [GetName] to read and [FilterByName] to search. The property is treated +// by the system as a regular user attribute. +func SetName(obj objectOrHeaderPtr, name string) { + obj.SetAttribute(attributeName, name) +} + +// GetName returns object name set according to [SetName]. Zero return means +// unset name. +func GetName(obj objectOrHeader) string { + return obj.Attribute(attributeName) +} + +// SetFileName associates given file name with the object by setting its +// 'FileName' attribute. The file name must not be empty. Use [GetFileName] to +// read and [FilterByFileName] to search. The property is treated by the system as a +// regular user attribute. +func SetFileName(obj objectOrHeaderPtr, file string) { + obj.SetAttribute(attributeFileName, file) +} + +// GetFileName returns associated file name set according to [SetFileName]. Zero +// return means unset file name. +func GetFileName(obj objectOrHeader) string { + return obj.Attribute(attributeFileName) +} + +// SetFilePath associates given filesystem path with the object by settings its +// 'FilePath' attribute. The path must not be empty. Use [GetFilePath] to read +// and [FilterByFilePath] to search. The property is treated by the system as a +// regular user attribute. +// +// The file path should start with a '/' and use '/' as a delimiting symbol. +// Trailing '/' should be interpreted as a virtual directory marker. If an +// object has conflicting file path and name (see [SetFileName]), the first one +// should have higher priority because it is used to construct the directory +// tree. The file path with trailing '/' and non-empty name should not be used +// together. Again, these statements are purely advisory and are not verified by +// the system. +func SetFilePath(obj objectOrHeaderPtr, filePath string) { + obj.SetAttribute(attributeFilePath, filePath) +} + +// GetFilePath returns associated file name set according to [SetFilePath]. Zero +// return means unset file name. +func GetFilePath(obj objectOrHeader) string { + return obj.Attribute(attributeFilePath) +} + +// SetCreationTime stamps object's creation time in Unix Timestamp format by +// setting its 'Timestamp' attribute. Use [GetCreationTime] to read and +// [FilterByCreationTime] to search. The property is treated by the system as a +// regular user attribute. +func SetCreationTime(obj objectOrHeaderPtr, t time.Time) { + obj.SetAttribute(attributeTimestamp, strconv.FormatInt(t.Unix(), 10)) +} + +// GetCreationTime returns object's creation time set according to +// [SetCreationTime]. Zero return (in seconds) means unset timestamp. +func GetCreationTime(obj objectOrHeader) time.Time { + var sec int64 + if s := obj.Attribute(attributeTimestamp); s != "" { + var err error + sec, err = strconv.ParseInt(s, 10, 64) + if err != nil { + panic(fmt.Sprintf("parse timestamp attribute: %v", err)) + } + } + return time.Unix(sec, 0) +} + +// SetContentType specifies MIME content type of payload of the object by +// setting its 'Content-Type' attribute. The type must not be empty. Use +// [GetContentType] to read and [FilterByContentType] to search. The property is +// treated by the system as a regular user attribute. +func SetContentType(obj objectOrHeaderPtr, contentType string) { + obj.SetAttribute(attributeContentType, contentType) +} + +// GetContentType returns content type of the object payload set according to +// [SetContentType]. Zero return means unset content type. +func GetContentType(obj objectOrHeader) string { + return obj.Attribute(attributeContentType) +} diff --git a/object/error.go b/object/error.go deleted file mode 100644 index 593cb41b1..000000000 --- a/object/error.go +++ /dev/null @@ -1,23 +0,0 @@ -package object - -// SplitInfoError is a special error that means that the original object is a large one (split into a number of smaller objects). -type SplitInfoError struct { - si *SplitInfo -} - -const splitInfoErrorMsg = "object not found, split info has been provided" - -// Error implements the error interface. -func (s *SplitInfoError) Error() string { - return splitInfoErrorMsg -} - -// SplitInfo returns [SplitInfo] data. -func (s *SplitInfoError) SplitInfo() *SplitInfo { - return s.si -} - -// NewSplitInfoError is a constructor for [SplitInfoError]. -func NewSplitInfoError(v *SplitInfo) *SplitInfoError { - return &SplitInfoError{si: v} -} diff --git a/object/error_test.go b/object/error_test.go deleted file mode 100644 index 22189a88d..000000000 --- a/object/error_test.go +++ /dev/null @@ -1,33 +0,0 @@ -package object_test - -import ( - "errors" - "testing" - - "github.com/nspcc-dev/neofs-sdk-go/object" - "github.com/stretchr/testify/require" -) - -func TestNewSplitInfoError(t *testing.T) { - var ( - si = generateSplitInfo() - - err error = object.NewSplitInfoError(si) - expectedErr *object.SplitInfoError - ) - - require.True(t, errors.As(err, &expectedErr)) - - siErr, ok := err.(*object.SplitInfoError) - require.True(t, ok) - require.Equal(t, si, siErr.SplitInfo()) -} - -func generateSplitInfo() *object.SplitInfo { - si := object.NewSplitInfo() - si.SetSplitID(object.NewSplitID()) - si.SetLastPart(generateID()) - si.SetLink(generateID()) - - return si -} diff --git a/object/fmt.go b/object/fmt.go deleted file mode 100644 index cccfe8b22..000000000 --- a/object/fmt.go +++ /dev/null @@ -1,194 +0,0 @@ -package object - -import ( - "bytes" - "crypto/sha256" - "errors" - "fmt" - - "github.com/nspcc-dev/neofs-api-go/v2/object" - "github.com/nspcc-dev/neofs-sdk-go/checksum" - neofscrypto "github.com/nspcc-dev/neofs-sdk-go/crypto" - oid "github.com/nspcc-dev/neofs-sdk-go/object/id" -) - -// MaxHeaderLen is a maximum allowed length of binary object header to be -// created via NeoFS API protocol. -const MaxHeaderLen = 16 << 10 - -var ( - errCheckSumMismatch = errors.New("payload checksum mismatch") - errCheckSumNotSet = errors.New("payload checksum is not set") - errIncorrectID = errors.New("incorrect object identifier") -) - -// CalculatePayloadChecksum calculates and returns checksum of -// object payload bytes. -func CalculatePayloadChecksum(payload []byte) checksum.Checksum { - var res checksum.Checksum - checksum.Calculate(&res, checksum.SHA256, payload) - - return res -} - -// CalculateAndSetPayloadChecksum calculates checksum of current -// object payload and writes it to the object. -func (o *Object) CalculateAndSetPayloadChecksum() { - o.SetPayloadChecksum( - CalculatePayloadChecksum(o.Payload()), - ) -} - -// VerifyPayloadChecksum checks if payload checksum in the object -// corresponds to its payload. -func (o *Object) VerifyPayloadChecksum() error { - actual := CalculatePayloadChecksum(o.Payload()) - - cs, set := o.PayloadChecksum() - if !set { - return errCheckSumNotSet - } - - if !bytes.Equal(cs.Value(), actual.Value()) { - return errCheckSumMismatch - } - - return nil -} - -// CalculateID calculates identifier for the object. -func (o *Object) CalculateID() (oid.ID, error) { - var id oid.ID - id.SetSHA256(sha256.Sum256(o.ToV2().GetHeader().StableMarshal(nil))) - - return id, nil -} - -// CalculateAndSetID calculates identifier for the object -// and writes the result to it. -func (o *Object) CalculateAndSetID() error { - id, err := o.CalculateID() - if err != nil { - return err - } - - o.SetID(id) - - return nil -} - -// VerifyID checks if identifier in the object corresponds to -// its structure. -func (o *Object) VerifyID() error { - id, err := o.CalculateID() - if err != nil { - return err - } - - oID, set := o.ID() - if !set { - return errOIDNotSet - } - - if !id.Equals(oID) { - return errIncorrectID - } - - return nil -} - -// Sign signs object id with provided key and sets that signature to the object. -// -// See also [oid.ID.CalculateIDSignature]. -func (o *Object) Sign(signer neofscrypto.Signer) error { - oID, set := o.ID() - if !set { - return errOIDNotSet - } - - sig, err := oID.CalculateIDSignature(signer) - if err != nil { - return err - } - - o.SetSignature(&sig) - - return nil -} - -// SignedData returns actual payload to sign. -// -// See also [Object.Sign]. -func (o *Object) SignedData() []byte { - oID, _ := o.ID() - bts, _ := oID.Marshal() - - return bts -} - -// VerifySignature verifies object ID signature. -func (o *Object) VerifySignature() bool { - m := (*object.Object)(o) - - sigV2 := m.GetSignature() - if sigV2 == nil { - return false - } - - idV2 := m.GetObjectID() - if idV2 == nil { - return false - } - - var sig neofscrypto.Signature - - return sig.ReadFromV2(*sigV2) == nil && sig.Verify(idV2.StableMarshal(nil)) -} - -// SetIDWithSignature sets object identifier and signature. -func (o *Object) SetIDWithSignature(signer neofscrypto.Signer) error { - if err := o.CalculateAndSetID(); err != nil { - return fmt.Errorf("could not set identifier: %w", err) - } - - if err := o.Sign(signer); err != nil { - return fmt.Errorf("could not set signature: %w", err) - } - - return nil -} - -// SetVerificationFields calculates and sets all verification fields of the object. -func (o *Object) SetVerificationFields(signer neofscrypto.Signer) error { - o.CalculateAndSetPayloadChecksum() - - return o.SetIDWithSignature(signer) -} - -// CheckVerificationFields checks all verification fields of the object. -func (o *Object) CheckVerificationFields() error { - if err := o.CheckHeaderVerificationFields(); err != nil { - return fmt.Errorf("invalid header structure: %w", err) - } - - if err := o.VerifyPayloadChecksum(); err != nil { - return fmt.Errorf("invalid payload checksum: %w", err) - } - - return nil -} - -var errInvalidSignature = errors.New("invalid signature") - -// CheckHeaderVerificationFields checks all verification fields except payload. -func (o *Object) CheckHeaderVerificationFields() error { - if !o.VerifySignature() { - return errInvalidSignature - } - - if err := o.VerifyID(); err != nil { - return fmt.Errorf("invalid identifier: %w", err) - } - - return nil -} diff --git a/object/fmt_test.go b/object/fmt_test.go deleted file mode 100644 index 69205020c..000000000 --- a/object/fmt_test.go +++ /dev/null @@ -1,82 +0,0 @@ -package object - -import ( - "crypto/rand" - "testing" - - cidtest "github.com/nspcc-dev/neofs-sdk-go/container/id/test" - "github.com/nspcc-dev/neofs-sdk-go/crypto/test" - usertest "github.com/nspcc-dev/neofs-sdk-go/user/test" - "github.com/stretchr/testify/require" -) - -func TestVerificationFields(t *testing.T) { - obj := New() - - payload := make([]byte, 10) - _, _ = rand.Read(payload) - - obj.SetPayload(payload) - obj.SetPayloadSize(uint64(len(payload))) - - require.NoError(t, obj.SetVerificationFields(test.RandomSigner(t))) - - require.NoError(t, obj.CheckVerificationFields()) - - items := []struct { - corrupt func() - restore func() - }{ - { - corrupt: func() { - payload[0]++ - }, - restore: func() { - payload[0]-- - }, - }, - { - corrupt: func() { - obj.SetPayloadSize(obj.PayloadSize() + 1) - }, - restore: func() { - obj.SetPayloadSize(obj.PayloadSize() - 1) - }, - }, - { - corrupt: func() { - obj.ToV2().GetObjectID().GetValue()[0]++ - }, - restore: func() { - obj.ToV2().GetObjectID().GetValue()[0]-- - }, - }, - } - - for _, item := range items { - item.corrupt() - - require.Error(t, obj.CheckVerificationFields()) - - item.restore() - - require.NoError(t, obj.CheckVerificationFields()) - } -} - -func TestObject_SignedData(t *testing.T) { - signer := test.RandomSigner(t) - uid := usertest.ID(t) - - rf := RequiredFields{ - Container: cidtest.ID(), - Owner: uid, - } - var val Object - - val.InitCreation(rf) - - require.NoError(t, val.SetVerificationFields(signer)) - - test.SignedDataComponent(t, signer, &val) -} diff --git a/object/header.go b/object/header.go new file mode 100644 index 000000000..37b0ff3fd --- /dev/null +++ b/object/header.go @@ -0,0 +1,728 @@ +package object + +import ( + "bytes" + "crypto/sha256" + "errors" + "fmt" + "strconv" + + "github.com/nspcc-dev/neofs-sdk-go/api/object" + "github.com/nspcc-dev/neofs-sdk-go/api/refs" + apisession "github.com/nspcc-dev/neofs-sdk-go/api/session" + "github.com/nspcc-dev/neofs-sdk-go/checksum" + cid "github.com/nspcc-dev/neofs-sdk-go/container/id" + neofscrypto "github.com/nspcc-dev/neofs-sdk-go/crypto" + oid "github.com/nspcc-dev/neofs-sdk-go/object/id" + "github.com/nspcc-dev/neofs-sdk-go/session" + "github.com/nspcc-dev/neofs-sdk-go/user" + "github.com/nspcc-dev/neofs-sdk-go/version" + "google.golang.org/protobuf/encoding/protojson" + "google.golang.org/protobuf/proto" +) + +// MaxHeaderLen is a maximum allowed length of binary object header to be +// created via NeoFS API protocol. See [Header.MarshaledSize]. +const MaxHeaderLen = 16 << 10 + +// Various system attributes. +const ( + sysAttributePrefix = "__NEOFS__" + attributeExpirationEpoch = sysAttributePrefix + "EXPIRATION_EPOCH" +) + +// Header groups meta information about particular NeoFS object required for +// system storage and data access. Each Header is associated with exactly one +// Object. +type Header struct { + version version.Version + container cid.ID + owner user.ID + attrs []*object.Header_Attribute + creationEpoch uint64 + session session.Object + // payload meta + typ Type + payloadSize uint64 + payloadChecksum checksum.Checksum + payloadHomoChecksum checksum.Checksum + // split-chain relations + splitFirst oid.ID + splitPrevious oid.ID + parentID oid.ID + parentSig neofscrypto.Signature + parentHdr *Header + splitID []byte // deprecated + children []oid.ID // deprecated + + verSet, cnrSet, ownerSet, sessionSet, csSet, csHomoSet, + splitFirstSet, splitPreviousSet, parentIDSet, parentSigSet bool +} + +// CopyTo writes deep copy of the Header to dst. +func (x Header) CopyTo(dst *Header) { + if dst.verSet = x.verSet; dst.verSet { + dst.version = x.version + } + if dst.cnrSet = x.cnrSet; dst.cnrSet { + dst.container = x.container + } + if dst.ownerSet = x.ownerSet; dst.ownerSet { + dst.owner = x.owner + } + if dst.sessionSet = x.sessionSet; dst.sessionSet { + x.session.CopyTo(&dst.session) + } + if dst.csSet = x.csSet; dst.csSet { + x.payloadChecksum.CopyTo(&dst.payloadChecksum) + } + if dst.csHomoSet = x.csHomoSet; dst.csHomoSet { + x.payloadHomoChecksum.CopyTo(&dst.payloadHomoChecksum) + } + if dst.splitFirstSet = x.splitFirstSet; dst.splitFirstSet { + dst.splitFirst = x.splitFirst + } + if dst.splitPreviousSet = x.splitPreviousSet; dst.splitPreviousSet { + dst.splitPrevious = x.splitPrevious + } + if dst.parentIDSet = x.parentIDSet; dst.parentIDSet { + dst.parentID = x.parentID + } + if dst.parentSigSet = x.parentSigSet; dst.parentSigSet { + x.parentSig.CopyTo(&dst.parentSig) + } + if x.parentHdr != nil { + dst.parentHdr = new(Header) + x.parentHdr.CopyTo(dst.parentHdr) + } else { + dst.parentHdr = nil + } + if x.attrs != nil { + dst.attrs = make([]*object.Header_Attribute, len(x.attrs)) + for i := range x.attrs { + if x.attrs[i] != nil { + dst.attrs[i] = &object.Header_Attribute{Key: x.attrs[i].Key, Value: x.attrs[i].Value} + } + } + } else { + dst.attrs = nil + } + if x.children != nil { + dst.children = make([]oid.ID, len(x.children)) + copy(dst.children, x.children) + } else { + dst.children = nil + } + dst.creationEpoch = x.creationEpoch + dst.payloadSize = x.payloadSize + dst.typ = x.typ + dst.splitID = bytes.Clone(x.splitID) +} + +func (x *Header) readFromV2(m *object.Header, checkFieldPresence bool) error { + if m.ObjectType < 0 { + return fmt.Errorf("invalid type field %d", m.ObjectType) + } + if x.cnrSet = m.ContainerId != nil; x.cnrSet { + if err := x.container.ReadFromV2(m.ContainerId); err != nil { + return fmt.Errorf("invalid container: %w", err) + } + } else if checkFieldPresence { + return errors.New("missing container") + } + if x.ownerSet = m.OwnerId != nil; x.ownerSet { + if err := x.owner.ReadFromV2(m.OwnerId); err != nil { + return fmt.Errorf("invalid owner: %w", err) + } + } else if checkFieldPresence { + return errors.New("missing owner") + } + if x.verSet = m.Version != nil; x.verSet { + if err := x.version.ReadFromV2(m.Version); err != nil { + return fmt.Errorf("invalid version: %w", err) + } + } + if x.csSet = m.PayloadHash != nil; x.csSet { + if err := x.payloadChecksum.ReadFromV2(m.PayloadHash); err != nil { + return fmt.Errorf("invalid payload checksum: %w", err) + } + } + if x.csHomoSet = m.HomomorphicHash != nil; x.csHomoSet { + if err := x.payloadHomoChecksum.ReadFromV2(m.HomomorphicHash); err != nil { + return fmt.Errorf("invalid payload homomorphic checksum: %w", err) + } + } + if x.sessionSet = m.SessionToken != nil; x.sessionSet { + if err := x.session.ReadFromV2(m.SessionToken); err != nil { + return fmt.Errorf("invalid session: %w", err) + } + } + if m.Split != nil { + if x.splitID = m.Split.SplitId; len(x.splitID) > 0 { + if len(x.splitID) != 16 { + return fmt.Errorf("invalid split-chain ID: wrong length %d", len(x.splitID)) + } + if ver := x.splitID[6] >> 4; ver != 4 { + return fmt.Errorf("invalid split-chain ID: wrong version #%d", ver) + } + } + if x.parentIDSet = m.Split.Parent != nil; x.parentIDSet { + if err := x.parentID.ReadFromV2(m.Split.Parent); err != nil { + return fmt.Errorf("invalid parent ID: %w", err) + } + } + if x.parentSigSet = m.Split.ParentSignature != nil; x.parentSigSet { + if err := x.parentSig.ReadFromV2(m.Split.ParentSignature); err != nil { + return fmt.Errorf("invalid parent signature: %w", err) + } + } + if x.splitPreviousSet = m.Split.Previous != nil; x.splitPreviousSet { + if err := x.splitPrevious.ReadFromV2(m.Split.Previous); err != nil { + return fmt.Errorf("invalid previous split-chain element: %w", err) + } + } + if x.splitFirstSet = m.Split.First != nil; x.splitFirstSet { + if err := x.splitFirst.ReadFromV2(m.Split.First); err != nil { + return fmt.Errorf("invalid first split-chain element: %w", err) + } + } + if len(m.Split.Children) > 0 { + x.children = make([]oid.ID, len(m.Split.Children)) + for i := range m.Split.Children { + if m.Split.Children[i] == nil { + return fmt.Errorf("nil child split-chain element #%d", i) + } + if err := x.children[i].ReadFromV2(m.Split.Children[i]); err != nil { + return fmt.Errorf("invalid child split-chain element #%d: %w", i, err) + } + } + } else { + x.children = nil + } + if m.Split.ParentHeader != nil { + if x.parentHdr == nil { + x.parentHdr = new(Header) + } + if err := x.parentHdr.readFromV2(m.Split.ParentHeader, checkFieldPresence); err != nil { + return fmt.Errorf("invalid parent header: %w", err) + } + } else { + x.parentHdr = nil + } + } + for i := range m.Attributes { + key := m.Attributes[i].GetKey() + if key == "" { + return fmt.Errorf("invalid attribute #%d: missing key", i) + } // also prevents further NPE + for j := 0; j < i; j++ { + if m.Attributes[j].Key == key { + return fmt.Errorf("multiple attributes with key=%s", key) + } + } + if m.Attributes[i].Value == "" { + return fmt.Errorf("invalid attribute #%d (%s): missing value", i, key) + } + switch key { + case attributeExpirationEpoch: + if _, err := strconv.ParseUint(m.Attributes[i].Value, 10, 64); err != nil { + return fmt.Errorf("invalid expiration attribute (#%d): invalid integer (%w)", i, err) + } + case attributeTimestamp: + if _, err := strconv.ParseInt(m.Attributes[i].Value, 10, 64); err != nil { + return fmt.Errorf("invalid timestamp attribute (#%d): invalid integer (%w)", i, err) + } + } + } + x.attrs = m.Attributes + x.typ = Type(m.ObjectType) + x.creationEpoch = m.CreationEpoch + x.payloadSize = m.PayloadLength + return nil +} + +// ReadFromV2 reads Header from the [object.Header] message. Returns an error if +// the message is malformed according to the NeoFS API V2 protocol. The message +// must not be nil. +// +// ReadFromV2 is intended to be used by the NeoFS API V2 client/server +// implementation only and is not expected to be directly used by applications. +// +// See also [Header.WriteToV2]. +func (x *Header) ReadFromV2(m *object.Header) error { + return x.readFromV2(m, true) +} + +// WriteToV2 writes Header to the [object.Header] message of the NeoFS API +// protocol. +// +// WriteToV2 is intended to be used by the NeoFS API V2 client/server +// implementation only and is not expected to be directly used by applications. +// +// See also [Header.ReadFromV2]. +func (x Header) WriteToV2(m *object.Header) { + if x.cnrSet { + m.ContainerId = new(refs.ContainerID) + x.container.WriteToV2(m.ContainerId) + } else { + m.ContainerId = nil + } + if x.ownerSet { + m.OwnerId = new(refs.OwnerID) + x.owner.WriteToV2(m.OwnerId) + } else { + m.OwnerId = nil + } + if x.verSet { + m.Version = new(refs.Version) + x.version.WriteToV2(m.Version) + } else { + m.Version = nil + } + if x.csSet { + m.PayloadHash = new(refs.Checksum) + x.payloadChecksum.WriteToV2(m.PayloadHash) + } else { + m.PayloadHash = nil + } + if x.csHomoSet { + m.HomomorphicHash = new(refs.Checksum) + x.payloadHomoChecksum.WriteToV2(m.HomomorphicHash) + } else { + m.HomomorphicHash = nil + } + if x.sessionSet { + m.SessionToken = new(apisession.SessionToken) + x.session.WriteToV2(m.SessionToken) + } else { + m.SessionToken = nil + } + if x.parentIDSet || x.splitPreviousSet || x.splitFirstSet || x.parentSigSet || x.parentHdr != nil || + len(x.splitID) > 0 || len(x.children) > 0 { + m.Split = &object.Header_Split{ + SplitId: x.splitID, + } + if x.parentIDSet { + m.Split.Parent = new(refs.ObjectID) + x.parentID.WriteToV2(m.Split.Parent) + } + if x.splitPreviousSet { + m.Split.Previous = new(refs.ObjectID) + x.splitPrevious.WriteToV2(m.Split.Previous) + } + if x.splitFirstSet { + m.Split.First = new(refs.ObjectID) + x.splitFirst.WriteToV2(m.Split.First) + } + if x.parentSigSet { + m.Split.ParentSignature = new(refs.Signature) + x.parentSig.WriteToV2(m.Split.ParentSignature) + } + if x.parentHdr != nil { + m.Split.ParentHeader = new(object.Header) + x.parentHdr.WriteToV2(m.Split.ParentHeader) + } + if len(x.children) > 0 { + m.Split.Children = make([]*refs.ObjectID, len(x.children)) + for i := range x.children { + m.Split.Children[i] = new(refs.ObjectID) + x.children[i].WriteToV2(m.Split.Children[i]) + } + } else { + m.Split.Children = nil + } + } else { + m.Split = nil + } + m.Attributes = x.attrs + m.ObjectType = object.ObjectType(x.typ) + m.CreationEpoch = x.creationEpoch + m.PayloadLength = x.payloadSize +} + +// MarshaledSize returns length of the Header encoded into the binary format of +// the NeoFS API protocol (Protocol Buffers V3 with direct field order). +// +// See also [Header.Marshal]. +func (x Header) MarshaledSize() int { + var m object.Header + x.WriteToV2(&m) + return m.MarshaledSize() +} + +// Marshal encodes Header into a binary format of the NeoFS API protocol +// (Protocol Buffers V3 with direct field order). +// +// See also [Header.MarshaledSize], [Header.Unmarshal]. +func (x Header) Marshal() []byte { + var m object.Header + x.WriteToV2(&m) + b := make([]byte, m.MarshaledSize()) + m.MarshalStable(b) + return b +} + +// Unmarshal decodes Protocol Buffers V3 binary data into the Header. Returns an +// error describing a format violation of the specified fields. Unmarshal does +// not check presence of the required fields and, at the same time, checks +// format of presented fields. +// +// See also [Header.Marshal]. +func (x *Header) Unmarshal(data []byte) error { + var m object.Header + err := proto.Unmarshal(data, &m) + if err != nil { + return fmt.Errorf("decode protobuf: %w", err) + } + return x.readFromV2(&m, false) +} + +// MarshalJSON encodes Header into a JSON format of the NeoFS API protocol +// (Protocol Buffers V3 JSON). +// +// See also [Header.UnmarshalJSON]. +func (x Header) MarshalJSON() ([]byte, error) { + var m object.Header + x.WriteToV2(&m) + return protojson.Marshal(&m) +} + +// UnmarshalJSON decodes NeoFS API protocol JSON data into the Header (Protocol +// Buffers V3 JSON). Returns an error describing a format violation. +// UnmarshalJSON does not check presence of the required fields and, at the same +// time, checks format of presented fields. +// +// See also [Header.MarshalJSON]. +func (x *Header) UnmarshalJSON(data []byte) error { + var m object.Header + err := protojson.Unmarshal(data, &m) + if err != nil { + return fmt.Errorf("decode protojson: %w", err) + } + return x.readFromV2(&m, false) +} + +// PayloadSize returns payload length of the object in bytes. Note that +// PayloadSize may differ from actual length of the object payload bytes. +// +// See also [Header.SetPayloadSize]. +func (x Header) PayloadSize() uint64 { + return x.payloadSize +} + +// SetPayloadSize sets payload length of the object in bytes. Note that +// SetPayloadSize does not affect actual object payload bytes. +// +// See also [Header.PayloadSize]. +func (x *Header) SetPayloadSize(v uint64) { + x.payloadSize = v +} + +// ContainerID returns identifier of the container with which the object is +// associated. Zero return indicates no binding. +// +// See also [Header.SetContainerID]. +func (x Header) ContainerID() cid.ID { + if x.cnrSet { + return x.container + } + return cid.ID{} +} + +// SetContainerID associates the object with the referenced container. +// +// See also [Header.ContainerID]. +func (x *Header) SetContainerID(v cid.ID) { + x.container, x.cnrSet = v, true +} + +// OwnerID returns identifier of the object owner. Zero return indicates no +// binding. +// +// See also [Header.SetOwnerID]. +func (x Header) OwnerID() user.ID { + if x.ownerSet { + return x.owner + } + return user.ID{} +} + +// SetOwnerID sets identifier of the object owner. +// +// See also [Header.OwnerID]. +func (x *Header) SetOwnerID(v user.ID) { + x.owner, x.ownerSet = v, true +} + +// CreationEpoch returns number of the NeoFS epoch when object was created. +// +// See also [Header.SetCreationEpoch]. +func (x Header) CreationEpoch() uint64 { + return x.creationEpoch +} + +// SetCreationEpoch sets number of the NeoFS epoch when object was created. +// +// See also [Header.CreationEpoch]. +func (x *Header) SetCreationEpoch(v uint64) { + x.creationEpoch = v +} + +// PayloadChecksum returns checksum of the object payload. Zero-type return +// indicates checksum absence. +// +// See also [Header.SetPayloadChecksum]. +func (x Header) PayloadChecksum() checksum.Checksum { + if x.csSet { + return x.payloadChecksum + } + return checksum.Checksum{} +} + +// SetPayloadChecksum sets checksum of the object payload. +// +// See also [Header.PayloadChecksum]. +func (x *Header) SetPayloadChecksum(v checksum.Checksum) { + x.payloadChecksum, x.csSet = v, true +} + +// PayloadHomomorphicChecksum returns homomorphic checksum of the object +// payload. Zero-type return indicates checksum absence. +// +// See also [Header.SetPayloadHomomorphicChecksum]. +func (x Header) PayloadHomomorphicChecksum() checksum.Checksum { + if x.csHomoSet { + return x.payloadHomoChecksum + } + return checksum.Checksum{} +} + +// SetPayloadHomomorphicChecksum sets homomorphic checksum of the object +// payload. +// +// See also [Header.PayloadHomomorphicChecksum]. +func (x *Header) SetPayloadHomomorphicChecksum(v checksum.Checksum) { + x.payloadHomoChecksum, x.csHomoSet = v, true +} + +// SetAttribute sets object attribute value by key. Both key and value MUST NOT +// be empty. Attributes set by the creator (owner) are most commonly ignored by +// the NeoFS system and used for application layer. Some attributes are +// so-called system or well-known attributes: they are reserved for system +// needs. System attributes SHOULD NOT be modified using SetAttribute, use +// corresponding methods/functions. List of the reserved keys is documented in +// the particular protocol version. +// +// SetAttribute overwrites existing attribute value. +// +// See also [Header.Attribute], [Header.IterateAttributes]. +func (x *Header) SetAttribute(key, value string) { + if key == "" { + panic("empty attribute key") + } else if value == "" { + panic("empty attribute value") + } + + for i := range x.attrs { + if x.attrs[i].GetKey() == key { + x.attrs[i].Value = value + return + } + } + + x.attrs = append(x.attrs, &object.Header_Attribute{Key: key, Value: value}) +} + +// Attribute reads value of the object attribute by key. Empty result means +// attribute absence. +// +// See also [Header.SetAttribute], [Header.IterateAttributes]. +func (x Header) Attribute(key string) string { + for i := range x.attrs { + if x.attrs[i].GetKey() == key { + return x.attrs[i].GetValue() + } + } + return "" +} + +// NumberOfAttributes returns number of all attributes specified for this +// object. +// +// See also [Header.SetAttribute], [Header.IterateAttributes]. +func (x Header) NumberOfAttributes() int { + return len(x.attrs) +} + +// IterateAttributes iterates over all object attributes and passes them into f. +// The handler MUST NOT be nil. +// +// See also [Header.SetAttribute], [Header.Attribute], +// [Header.NumberOfAttributes]. +func (x Header) IterateAttributes(f func(key, val string)) { + for i := range x.attrs { + f(x.attrs[i].GetKey(), x.attrs[i].GetValue()) + } +} + +// SetExpirationEpoch sets NeoFS epoch when the object becomes expired. By +// default, objects never expires. +// +// Reaction of NeoFS system components to the objects' 'expired' property may +// vary. For example, in the basic scenario, expired objects are auto-deleted +// from the storage. Detailed behavior can be found in the NeoFS Specification. +// +// Note that the value determines exactly the last epoch of the object's +// relevance: for example, with the value N, the object is relevant in epoch N +// and expired in any epoch starting from N+1. +func (x *Header) SetExpirationEpoch(epoch uint64) { + x.SetAttribute(attributeExpirationEpoch, strconv.FormatUint(epoch, 10)) +} + +// ExpirationEpoch returns last NeoFS epoch of the object lifetime. Zero return +// means the object will never expire. For more details see +// [Header.SetExpirationEpoch]. +func (x Header) ExpirationEpoch() uint64 { + var epoch uint64 + if s := x.Attribute(attributeExpirationEpoch); s != "" { + var err error + epoch, err = strconv.ParseUint(s, 10, 64) + if err != nil { + // this could happen due to package developer only + panic(fmt.Errorf("parse expiration epoch attribute: %w", err)) + } + } + return epoch +} + +// PreviousSplitObject returns identifier of the object that is the previous +// link in the split-chain of the common parent. Zero return indicates no +// relation. +// +// See also [Header.SetPreviousSplitObject]. +func (x Header) PreviousSplitObject() oid.ID { + if x.splitPreviousSet { + return x.splitPrevious + } + return oid.ID{} +} + +// SetPreviousSplitObject sets identifier of the object that is the previous +// link in the split-chain of the common parent. +// +// See also [Header.SetPreviousSplitObject]. +func (x *Header) SetPreviousSplitObject(v oid.ID) { + x.splitPrevious, x.splitPreviousSet = v, true +} + +// SetFirstSplitObject sets identifier of the object that is the first link in +// the split-chain of the common parent. +// +// See also [Header.FirstSplitObject]. +func (x *Header) SetFirstSplitObject(id oid.ID) { + x.splitFirst, x.splitFirstSet = id, true +} + +// FirstSplitObject sets identifier of the object that is the first link in the +// split-chain of the common parent. Zero return indicates no relation. +// +// See also [Header.SetFirstSplitObject]. +func (x Header) FirstSplitObject() oid.ID { + if x.splitFirstSet { + return x.splitFirst + } + return oid.ID{} +} + +// ParentID returns identifier of the parent object of which this object is the +// split-chain part. Zero return indicates no relation. +// +// See also [Header.SetParentID]. +func (x Header) ParentID() oid.ID { + if x.parentIDSet { + return x.parentID + } + return oid.ID{} +} + +// SetParentID sets identifier of the parent object of which this object is the +// split-chain part. +// +// See also [Header.ParentID]. +func (x *Header) SetParentID(v oid.ID) { + x.parentID, x.parentIDSet = v, true +} + +// ParentSignature returns signature of the parent object of which this object +// is the split-chain part. Zero-scheme return indicates signature absence. +// +// See also [Header.SetParentSignature]. +func (x Header) ParentSignature() neofscrypto.Signature { + if x.parentSigSet { + return x.parentSig + } + return neofscrypto.Signature{} +} + +// SetParentSignature sets signature of the parent object of which this object +// is the split-chain part. +// +// See also [Header.ParentSignature]. +func (x *Header) SetParentSignature(sig neofscrypto.Signature) { + x.parentSig, x.parentSigSet = sig, true +} + +// ParentHeader returns header of the parent object of which this object is the +// split-chain part. Second value indicates parent header's presence. +// +// See also [Header.SetParentHeader]. +func (x Header) ParentHeader() (Header, bool) { + if x.parentHdr != nil { + return *x.parentHdr, true + } + return Header{}, false +} + +// SetParentHeader sets header of the parent object of which this object is the +// split-chain part. +// +// See also [Object.Parent]. +func (x *Header) SetParentHeader(h Header) { + x.parentHdr = &h +} + +// SessionToken returns token of the NeoFS session within which object was +// created. Second value indicates session token presence. +// +// See also [Header.SetSessionToken]. +func (x Header) SessionToken() (session.Object, bool) { + return x.session, x.sessionSet +} + +// SetSessionToken sets token of the NeoFS session within which object was +// created. +// +// See also [Header.SessionToken]. +func (x *Header) SetSessionToken(t session.Object) { + x.session, x.sessionSet = t, true +} + +// Type returns object type indicating payload format. +// +// See also [Header.SetType]. +func (x Header) Type() Type { + return x.typ +} + +// SetType sets object type indicating payload format. +// +// See also [Header.Type]. +func (x *Header) SetType(v Type) { + x.typ = v +} + +// CalculateID calculates and returns CAS ID for the object with specified +// header. +func CalculateID(hdr Header) cid.ID { + return sha256.Sum256(hdr.Marshal()) +} diff --git a/object/id/address.go b/object/id/address.go index 1b07d0b62..185830807 100644 --- a/object/id/address.go +++ b/object/id/address.go @@ -5,15 +5,18 @@ import ( "fmt" "strings" - "github.com/nspcc-dev/neofs-api-go/v2/refs" + "github.com/nspcc-dev/neofs-sdk-go/api/refs" cid "github.com/nspcc-dev/neofs-sdk-go/container/id" + "google.golang.org/protobuf/encoding/protojson" ) // Address represents global object identifier in NeoFS network. Each object // belongs to exactly one container and is uniquely addressed within the container. // -// Address is mutually compatible with github.com/nspcc-dev/neofs-api-go/v2/refs.Address -// message. See ReadFromV2 / WriteToV2 methods. +// ID implements built-in comparable interface. +// +// Address is mutually compatible with [refs.Address] message. See +// [Address.ReadFromV2] / [Address.WriteToV2] methods. // // Instances can be created using built-in var declaration. type Address struct { @@ -22,27 +25,29 @@ type Address struct { obj ID } -// ReadFromV2 reads Address from the refs.Address message. Returns an error if -// the message is malformed according to the NeoFS API V2 protocol. +// ReadFromV2 reads ID from the [refs.Address] message. Returns an error if the +// message is malformed according to the NeoFS API V2 protocol. The message must +// not be nil. +// +// ReadFromV2 is intended to be used by the NeoFS API V2 client/server +// implementation only and is not expected to be directly used by applications. // -// See also WriteToV2. -func (x *Address) ReadFromV2(m refs.Address) error { - cnr := m.GetContainerID() - if cnr == nil { +// See also [Address.WriteToV2]. +func (x *Address) ReadFromV2(m *refs.Address) error { + if m.ContainerId == nil { return errors.New("missing container ID") } - obj := m.GetObjectID() - if obj == nil { + if m.ObjectId == nil { return errors.New("missing object ID") } - err := x.cnr.ReadFromV2(*cnr) + err := x.cnr.ReadFromV2(m.ContainerId) if err != nil { return fmt.Errorf("invalid container ID: %w", err) } - err = x.obj.ReadFromV2(*obj) + err = x.obj.ReadFromV2(m.ObjectId) if err != nil { return fmt.Errorf("invalid object ID: %w", err) } @@ -50,45 +55,43 @@ func (x *Address) ReadFromV2(m refs.Address) error { return nil } -// WriteToV2 writes Address to the refs.Address message. -// The message must not be nil. +// WriteToV2 writes ID to the [refs.Address] message of the NeoFS API +// protocol. +// +// WriteToV2 is intended to be used by the NeoFS API V2 client/server +// implementation only and is not expected to be directly used by applications. // -// See also ReadFromV2. +// See also [Address.ReadFromV2]. func (x Address) WriteToV2(m *refs.Address) { - var obj refs.ObjectID - x.obj.WriteToV2(&obj) - - var cnr refs.ContainerID - x.cnr.WriteToV2(&cnr) - - m.SetObjectID(&obj) - m.SetContainerID(&cnr) + m.ContainerId = new(refs.ContainerID) + x.cnr.WriteToV2(m.ContainerId) + m.ObjectId = new(refs.ObjectID) + x.obj.WriteToV2(m.ObjectId) } // MarshalJSON encodes Address into a JSON format of the NeoFS API protocol -// (Protocol Buffers JSON). +// (Protocol Buffers V3 JSON). // -// See also UnmarshalJSON. +// See also [Address.UnmarshalJSON]. func (x Address) MarshalJSON() ([]byte, error) { var m refs.Address x.WriteToV2(&m) - return m.MarshalJSON() + return protojson.Marshal(&m) } -// UnmarshalJSON decodes NeoFS API protocol JSON format into the Address -// (Protocol Buffers JSON). Returns an error describing a format violation. +// UnmarshalJSON decodes NeoFS API protocol JSON data into the Address (Protocol +// Buffers V3 JSON). Returns an error describing a format violation. // -// See also MarshalJSON. +// See also [Address.MarshalJSON]. func (x *Address) UnmarshalJSON(data []byte) error { var m refs.Address - - err := m.UnmarshalJSON(data) + err := protojson.Unmarshal(data, &m) if err != nil { - return err + return fmt.Errorf("decode protojson: %w", err) } - return x.ReadFromV2(m) + return x.ReadFromV2(&m) } // Container returns unique identifier of the NeoFS object container. @@ -96,33 +99,33 @@ func (x *Address) UnmarshalJSON(data []byte) error { // Zero Address has zero container ID, which is incorrect according to NeoFS // API protocol. // -// See also SetContainer. +// See also [Address.SetContainer]. func (x Address) Container() cid.ID { return x.cnr } // SetContainer sets unique identifier of the NeoFS object container. // -// See also Container. +// See also [Address.Container]. func (x *Address) SetContainer(id cid.ID) { x.cnr = id } -// Object returns unique identifier of the object in the container -// identified by Container(). +// Object returns unique identifier of the object in the container identified by +// [Address.Container]. // // Zero Address has zero object ID, which is incorrect according to NeoFS // API protocol. // -// See also SetObject. +// See also [Address.SetObject]. func (x Address) Object() ID { return x.obj } -// SetObject sets unique identifier of the object in the container -// identified by Container(). +// SetObject sets unique identifier of the object in the container identified by +// [Address.Container]. // -// See also Object. +// See also [Address.Object]. func (x *Address) SetObject(id ID) { x.obj = id } @@ -133,7 +136,7 @@ const idDelimiter = "/" // EncodeToString encodes Address into NeoFS API protocol string: concatenation // of the string-encoded container and object IDs delimited by a slash. // -// See also DecodeString. +// See also [Address.DecodeString]. func (x Address) EncodeToString() string { return x.cnr.EncodeToString() + "/" + x.obj.EncodeToString() } @@ -141,7 +144,7 @@ func (x Address) EncodeToString() string { // DecodeString decodes string into Address according to NeoFS API protocol. Returns // an error if s is malformed. // -// See also DecodeString. +// See also [Address.EncodeToString]. func (x *Address) DecodeString(s string) error { indDelimiter := strings.Index(s, idDelimiter) if indDelimiter < 0 { @@ -164,8 +167,8 @@ func (x *Address) DecodeString(s string) error { // String implements [fmt.Stringer]. // // String is designed to be human-readable, and its format MAY differ between -// SDK versions. String MAY return same result as EncodeToString. String MUST NOT -// be used to encode Address into NeoFS protocol string. +// SDK versions. String MAY return same result as [Address.EncodeToString]. +// String MUST NOT be used to encode Address into NeoFS protocol string. func (x Address) String() string { return x.EncodeToString() } diff --git a/object/id/address_test.go b/object/id/address_test.go index 3c47cdbdd..dadb29361 100644 --- a/object/id/address_test.go +++ b/object/id/address_test.go @@ -3,107 +3,324 @@ package oid_test import ( "testing" - "github.com/nspcc-dev/neofs-api-go/v2/refs" + "github.com/nspcc-dev/neofs-sdk-go/api/refs" cidtest "github.com/nspcc-dev/neofs-sdk-go/container/id/test" oid "github.com/nspcc-dev/neofs-sdk-go/object/id" oidtest "github.com/nspcc-dev/neofs-sdk-go/object/id/test" "github.com/stretchr/testify/require" ) -func TestAddress_SetContainer(t *testing.T) { - var x oid.Address - - require.Zero(t, x.Container()) +func TestAddressComparable(t *testing.T) { + a1 := oidtest.Address() + require.True(t, a1 == a1) + a2 := oidtest.ChangeAddress(a1) + require.NotEqual(t, a1, a2) + require.False(t, a1 == a2) +} - cnr := cidtest.ID() +func TestAddress_ReadFromV2(t *testing.T) { + t.Run("missing fields", func(t *testing.T) { + t.Run("container", func(t *testing.T) { + a := oidtest.Address() + var m refs.Address + + a.WriteToV2(&m) + m.ContainerId = nil + require.ErrorContains(t, a.ReadFromV2(&m), "missing container ID") + }) + t.Run("object", func(t *testing.T) { + a := oidtest.Address() + var m refs.Address + + a.WriteToV2(&m) + m.ObjectId = nil + require.ErrorContains(t, a.ReadFromV2(&m), "missing object ID") + }) + }) + t.Run("invalid fields", func(t *testing.T) { + t.Run("container", func(t *testing.T) { + a := oidtest.Address() + var m refs.Address + + a.WriteToV2(&m) + m.ContainerId.Value = make([]byte, 31) + require.ErrorContains(t, a.ReadFromV2(&m), "invalid container ID: invalid value length 31") + m.ContainerId.Value = make([]byte, 33) + require.ErrorContains(t, a.ReadFromV2(&m), "invalid container ID: invalid value length 33") + }) + t.Run("object", func(t *testing.T) { + a := oidtest.Address() + var m refs.Address + + a.WriteToV2(&m) + m.ObjectId.Value = make([]byte, 31) + require.ErrorContains(t, a.ReadFromV2(&m), "invalid object ID: invalid value length 31") + m.ObjectId.Value = make([]byte, 33) + require.ErrorContains(t, a.ReadFromV2(&m), "invalid object ID: invalid value length 33") + }) + }) +} - x.SetContainer(cnr) - require.Equal(t, cnr, x.Container()) +func TestNodeInfo_UnmarshalJSON(t *testing.T) { + t.Run("invalid json", func(t *testing.T) { + var a oid.Address + msg := []byte("definitely_not_protojson") + err := a.UnmarshalJSON(msg) + require.ErrorContains(t, err, "decode protojson") + }) + t.Run("invalid fields", func(t *testing.T) { + testCases := []struct { + name string + err string + json string + }{{name: "missing container", err: "missing container ID", json: ` +{ + "objectID": { + "value": "a86CDbsGIuktRdOUdsUkdW4iaNsfjX5LwUncE1zHk+s=" + } +}`}, + {name: "missing object", err: "missing object ID", json: ` +{ + "containerID": { + "value": "lW85+mf1fm2JnD2P/sVrSijaf2U2G6v+PEYC6EeGk9s=" + } +} +`}, + {name: "invalid container length", err: "invalid container ID: invalid value length 31", json: ` +{ + "containerID": { + "value": "LfmuVsOC2bNfuxvuGrdnHwIM+QhDMO8eD22Vlgl2JQ==" + }, + "objectID": { + "value": "a86CDbsGIuktRdOUdsUkdW4iaNsfjX5LwUncE1zHk+s=" + } +}`}, + {name: "invalid object length", err: "invalid object ID: invalid value length 33", json: ` +{ + "containerID": { + "value": "lW85+mf1fm2JnD2P/sVrSijaf2U2G6v+PEYC6EeGk9s=" + }, + "objectID": { + "value": "3007wQX0PGK+/ERYq1Xj/Lg6qMj2jsnDorgzB/apoi6v" + } +}`}, + } + + for _, testCase := range testCases { + t.Run(testCase.name, func(t *testing.T) { + var a oid.Address + require.ErrorContains(t, a.UnmarshalJSON([]byte(testCase.json)), testCase.err) + }) + } + }) } -func TestAddress_SetObject(t *testing.T) { - var x oid.Address +func TestAddress_DecodeString(t *testing.T) { + var a oid.Address + + const zeroAddrString = "11111111111111111111111111111111/11111111111111111111111111111111" + require.Equal(t, zeroAddrString, a.EncodeToString()) + a = oidtest.ChangeAddress(a) + require.NoError(t, a.DecodeString(zeroAddrString)) + require.Equal(t, zeroAddrString, a.EncodeToString()) + require.Zero(t, a) + + var bCnr = [32]byte{231, 129, 236, 104, 74, 71, 155, 100, 72, 209, 186, 80, 2, 184, 9, 161, 10, 76, 18, 203, 126, 94, 101, 42, 157, 211, 66, 99, 247, 143, 226, 23} + var bObj = [32]byte{67, 239, 220, 249, 222, 147, 14, 92, 52, 46, 242, 209, 101, 80, 248, 39, 206, 189, 29, 55, 8, 3, 70, 205, 213, 7, 46, 54, 192, 232, 35, 247} + const str = "Gai5pjZVewwmscQ5UczQbj2W8Wkh9d1BGUoRNzjR6QCN/5aCTDRcH248TManyLPJYkGnqsCGAfiyMw6rgvNCkHu98" + require.NoError(t, a.DecodeString(str)) + require.Equal(t, str, a.EncodeToString()) + require.EqualValues(t, bCnr, a.Container()) + require.EqualValues(t, bObj, a.Object()) + + var bCnrOther = [32]byte{190, 131, 185, 144, 207, 179, 2, 201, 93, 205, 169, 242, 167, 89, 56, 112, 48, 5, 13, 128, 58, 179, 92, 119, 37, 234, 236, 35, 9, 89, 73, 97} + var bObjOther = [32]byte{77, 244, 70, 159, 204, 190, 29, 22, 105, 203, 94, 30, 169, 236, 97, 176, 179, 51, 89, 138, 164, 69, 157, 131, 190, 246, 16, 93, 93, 249, 66, 95} + const strOther = "DpgxiqnrkpZzYwmT58AzY9w51V41P5dWNKuVGm7oeEak/6FJS2jh2cKmtHL54tQSREuJ3bUG2pkbvChJPyJ3ZchSW" + require.NoError(t, a.DecodeString(strOther)) + require.Equal(t, strOther, a.EncodeToString()) + require.EqualValues(t, bCnrOther, a.Container()) + require.EqualValues(t, bObjOther, a.Object()) + + t.Run("invalid", func(t *testing.T) { + var a oid.Address + for _, testCase := range []struct{ input, err string }{ + {input: "", err: "missing delimiter"}, + {input: "no_delimiter", err: "missing delimiter"}, + {input: "/Gai5pjZVewwmscQ5UczQbj2W8Wkh9d1BGUoRNzjR6QCN", err: "decode container string: invalid value length 0"}, + {input: "Gai5pjZVewwmscQ5UczQbj2W8Wkh9d1BGUoRNzjR6QCN/", err: "decode object string: invalid value length 0"}, + {input: "qxAE9SLuDq7dARPAFaWG6vbuGoocwoTn19LK5YVqnS/Gai5pjZVewwmscQ5UczQbj2W8Wkh9d1BGUoRNzjR6QCN", err: "decode container string: invalid value length 31"}, + {input: "Gai5pjZVewwmscQ5UczQbj2W8Wkh9d1BGUoRNzjR6QCN/qxAE9SLuDq7dARPAFaWG6vbuGoocwoTn19LK5YVqnS", err: "decode object string: invalid value length 31"}, + {input: "HJJEkEKthnvMw7NsZNgzBEQ4tf9AffmaBYWxfBULvvbPW/Gai5pjZVewwmscQ5UczQbj2W8Wkh9d1BGUoRNzjR6QCN", err: "decode container string: invalid value length 33"}, + {input: "Gai5pjZVewwmscQ5UczQbj2W8Wkh9d1BGUoRNzjR6QCN/HJJEkEKthnvMw7NsZNgzBEQ4tf9AffmaBYWxfBULvvbPW", err: "decode object string: invalid value length 33"}, + } { + require.ErrorContains(t, a.DecodeString(testCase.input), testCase.err, testCase) + } + }) + t.Run("encoding", func(t *testing.T) { + t.Run("api", func(t *testing.T) { + var src, dst oid.Address + var msg refs.Address + + require.NoError(t, dst.DecodeString(str)) + + src.WriteToV2(&msg) + require.Equal(t, make([]byte, 32), msg.ContainerId.Value) + require.Equal(t, make([]byte, 32), msg.ObjectId.Value) + require.NoError(t, dst.ReadFromV2(&msg)) + require.Zero(t, dst) + require.Equal(t, zeroAddrString, dst.EncodeToString()) + + require.NoError(t, src.DecodeString(str)) + + src.WriteToV2(&msg) + require.Equal(t, bCnr[:], msg.ContainerId.Value) + require.EqualValues(t, bObj[:], msg.ObjectId.Value) + err := dst.ReadFromV2(&msg) + require.NoError(t, err) + require.EqualValues(t, bCnr, dst.Container()) + require.EqualValues(t, bObj, dst.Object()) + require.Equal(t, str, dst.EncodeToString()) + }) + t.Run("json", func(t *testing.T) { + var src, dst oid.Address + + require.NoError(t, dst.DecodeString(str)) + + j, err := src.MarshalJSON() + require.NoError(t, err) + require.NoError(t, dst.UnmarshalJSON(j)) + require.Zero(t, dst) + require.Equal(t, zeroAddrString, dst.EncodeToString()) + + require.NoError(t, src.DecodeString(str)) + + j, err = src.MarshalJSON() + require.NoError(t, err) + err = dst.UnmarshalJSON(j) + require.NoError(t, err) + require.EqualValues(t, bCnr, dst.Container()) + require.EqualValues(t, bObj, dst.Object()) + require.Equal(t, str, dst.EncodeToString()) + }) + }) +} - require.Zero(t, x.Object()) +func TestAddress_SetContainer(t *testing.T) { + var a oid.Address - obj := oidtest.ID() + require.Zero(t, a.Container()) - x.SetObject(obj) - require.Equal(t, obj, x.Object()) -} + cnr := cidtest.ID() + a.SetContainer(cnr) + require.Equal(t, cnr, a.Container()) -func TestAddress_ReadFromV2(t *testing.T) { - var x oid.Address - var xV2 refs.Address + cnrOther := cidtest.ChangeID(cnr) + a.SetContainer(cnrOther) + require.Equal(t, cnrOther, a.Container()) - require.Error(t, x.ReadFromV2(xV2)) + t.Run("encoding", func(t *testing.T) { + t.Run("api", func(t *testing.T) { + var src, dst oid.Address + var msg refs.Address - var cnrV2 refs.ContainerID - xV2.SetContainerID(&cnrV2) + // set required data just to satisfy decoder + src.SetObject(oidtest.ID()) - require.Error(t, x.ReadFromV2(xV2)) + dst.SetContainer(cnr) - cnr := cidtest.ID() - cnr.WriteToV2(&cnrV2) + src.WriteToV2(&msg) + require.Equal(t, make([]byte, 32), msg.ContainerId.Value) + require.NoError(t, dst.ReadFromV2(&msg)) + require.Zero(t, dst.Container()) - require.Error(t, x.ReadFromV2(xV2)) + src.SetContainer(cnr) - var objV2 refs.ObjectID - xV2.SetObjectID(&objV2) + src.WriteToV2(&msg) + require.Equal(t, cnr[:], msg.ContainerId.Value) + err := dst.ReadFromV2(&msg) + require.NoError(t, err) + require.Equal(t, cnr, dst.Container()) + }) + t.Run("json", func(t *testing.T) { + var src, dst oid.Address - require.Error(t, x.ReadFromV2(xV2)) + // set required data just to satisfy decoder + src.SetObject(oidtest.ID()) - obj := oidtest.ID() - obj.WriteToV2(&objV2) + dst.SetContainer(cnr) - require.NoError(t, x.ReadFromV2(xV2)) - require.Equal(t, cnr, x.Container()) - require.Equal(t, obj, x.Object()) + j, err := src.MarshalJSON() + require.NoError(t, err) + require.NoError(t, dst.UnmarshalJSON(j)) + require.Zero(t, dst.Container()) - var xV2To refs.Address - x.WriteToV2(&xV2To) + src.SetContainer(cnr) - require.Equal(t, xV2, xV2To) + j, err = src.MarshalJSON() + require.NoError(t, err) + err = dst.UnmarshalJSON(j) + require.NoError(t, err) + require.Equal(t, cnr, dst.Container()) + }) + }) } -func TestAddress_DecodeString(t *testing.T) { - var x, x2 oid.Address +func TestAddress_SetObject(t *testing.T) { + var a oid.Address - require.NoError(t, x2.DecodeString(x.EncodeToString())) - require.Equal(t, x, x2) + require.Zero(t, a.Object()) - cnr := cidtest.ID() obj := oidtest.ID() + a.SetObject(obj) + require.Equal(t, obj, a.Object()) - x.SetContainer(cnr) - x.SetObject(obj) + objOther := oidtest.ChangeID(obj) + a.SetObject(objOther) + require.Equal(t, objOther, a.Object()) - require.NoError(t, x2.DecodeString(x.EncodeToString())) - require.Equal(t, x, x2) + t.Run("encoding", func(t *testing.T) { + t.Run("api", func(t *testing.T) { + var src, dst oid.Address + var msg refs.Address - strCnr := cnr.EncodeToString() - strObj := obj.EncodeToString() + // set required data just to satisfy decoder + src.SetContainer(cidtest.ID()) - require.Error(t, x2.DecodeString("")) - require.Error(t, x2.DecodeString("/")) - require.Error(t, x2.DecodeString(strCnr)) - require.Error(t, x2.DecodeString(strCnr+"/")) - require.Error(t, x2.DecodeString("/"+strCnr)) - require.Error(t, x2.DecodeString(strCnr+strObj)) - require.Error(t, x2.DecodeString(strCnr+"\\"+strObj)) - require.NoError(t, x2.DecodeString(strCnr+"/"+strObj)) -} + dst.SetObject(obj) + + src.WriteToV2(&msg) + require.Equal(t, make([]byte, 32), msg.ObjectId.Value) + require.NoError(t, dst.ReadFromV2(&msg)) + require.Zero(t, dst.Object()) + + src.SetObject(obj) + + src.WriteToV2(&msg) + require.Equal(t, obj[:], msg.ObjectId.Value) + err := dst.ReadFromV2(&msg) + require.NoError(t, err) + require.Equal(t, obj, dst.Object()) + }) + t.Run("json", func(t *testing.T) { + var src, dst oid.Address + + // set required data just to satisfy decoder + src.SetContainer(cidtest.ID()) -func TestAddressEncoding(t *testing.T) { - v := oidtest.Address() + dst.SetObject(obj) - t.Run("json", func(t *testing.T) { - data, err := v.MarshalJSON() - require.NoError(t, err) + j, err := src.MarshalJSON() + require.NoError(t, err) + require.NoError(t, dst.UnmarshalJSON(j)) + require.Zero(t, dst.Object()) - var v2 oid.Address - require.NoError(t, v2.UnmarshalJSON(data)) + src.SetObject(obj) - require.Equal(t, v, v2) + j, err = src.MarshalJSON() + require.NoError(t, err) + err = dst.UnmarshalJSON(j) + require.NoError(t, err) + require.Equal(t, obj, dst.Object()) + }) }) } diff --git a/object/id/id.go b/object/id/id.go index c3436b56f..5541f9f53 100644 --- a/object/id/id.go +++ b/object/id/id.go @@ -2,92 +2,61 @@ package oid import ( "crypto/sha256" + "errors" "fmt" "github.com/mr-tron/base58" - "github.com/nspcc-dev/neofs-api-go/v2/refs" - neofscrypto "github.com/nspcc-dev/neofs-sdk-go/crypto" + "github.com/nspcc-dev/neofs-sdk-go/api/refs" ) // ID represents NeoFS object identifier in a container. // -// ID is mutually compatible with github.com/nspcc-dev/neofs-api-go/v2/refs.ObjectID -// message. See ReadFromV2 / WriteToV2 methods. +// ID implements built-in comparable interface. // -// Instances can be created using built-in var declaration. -// -// Note that direct typecast is not safe and may result in loss of compatibility: +// ID is mutually compatible with [refs.ObjectID] message. See [ID.ReadFromV2] / +// [ID.WriteToV2] methods. // -// _ = ID([32]byte{}) // not recommended +// Instances can be created using built-in var declaration. type ID [sha256.Size]byte -// ReadFromV2 reads ID from the refs.ObjectID message. Returns an error if -// the message is malformed according to the NeoFS API V2 protocol. -// -// See also WriteToV2. -func (id *ID) ReadFromV2(m refs.ObjectID) error { - return id.Decode(m.GetValue()) -} - -// WriteToV2 writes ID to the refs.ObjectID message. -// The message must not be nil. -// -// See also ReadFromV2. -func (id ID) WriteToV2(m *refs.ObjectID) { - m.SetValue(id[:]) -} - -// Encode encodes ID into 32 bytes of dst. Panics if -// dst length is less than 32. -// -// Zero ID is all zeros. -// -// See also Decode. -func (id ID) Encode(dst []byte) { - if l := len(dst); l < sha256.Size { - panic(fmt.Sprintf("destination length is less than %d bytes: %d", sha256.Size, l)) +func (id *ID) decodeBinary(b []byte) error { + if len(b) != sha256.Size { + return fmt.Errorf("invalid value length %d", len(b)) } - - copy(dst, id[:]) + copy(id[:], b) + return nil } -// Decode decodes src bytes into ID. +// ReadFromV2 reads ID from the [refs.ObjectID] message. Returns an error if the +// message is malformed according to the NeoFS API V2 protocol. The message must +// not be nil. // -// Decode expects that src has 32 bytes length. If the input is malformed, -// Decode returns an error describing format violation. In this case ID -// remains unchanged. +// ReadFromV2 is intended to be used by the NeoFS API V2 client/server +// implementation only and is not expected to be directly used by applications. // -// Decode doesn't mutate src. -// -// See also Encode. -func (id *ID) Decode(src []byte) error { - if len(src) != 32 { - return fmt.Errorf("invalid length %d", len(src)) +// See also [ID.WriteToV2]. +func (id *ID) ReadFromV2(m *refs.ObjectID) error { + if len(m.Value) == 0 { + return errors.New("missing value field") } - - copy(id[:], src) - - return nil + return id.decodeBinary(m.Value) } -// SetSHA256 sets object identifier value to SHA256 checksum. -func (id *ID) SetSHA256(v [sha256.Size]byte) { - copy(id[:], v[:]) -} - -// Equals defines a comparison relation between two ID instances. +// WriteToV2 writes ID to the [refs.ObjectID] message of the NeoFS API protocol. +// +// WriteToV2 is intended to be used by the NeoFS API V2 client/server +// implementation only and is not expected to be directly used by applications. // -// Note that comparison using '==' operator is not recommended since it MAY result -// in loss of compatibility. -func (id ID) Equals(id2 ID) bool { - return id == id2 +// See also [ID.ReadFromV2]. +func (id ID) WriteToV2(m *refs.ObjectID) { + m.Value = id[:] } // EncodeToString encodes ID into NeoFS API protocol string. // // Zero ID is base58 encoding of 32 zeros. // -// See also DecodeString. +// See also [ID.DecodeString]. func (id ID) EncodeToString() string { return base58.Encode(id[:]) } @@ -95,73 +64,46 @@ func (id ID) EncodeToString() string { // DecodeString decodes string into ID according to NeoFS API protocol. Returns // an error if s is malformed. // -// See also DecodeString. +// See also [ID.EncodeToString]. func (id *ID) DecodeString(s string) error { - data, err := base58.Decode(s) - if err != nil { - return fmt.Errorf("decode base58: %w", err) + var b []byte + if s != "" { + var err error + b, err = base58.Decode(s) + if err != nil { + return fmt.Errorf("decode base58: %w", err) + } } - - return id.Decode(data) + return id.decodeBinary(b) } // String implements [fmt.Stringer]. // // String is designed to be human-readable, and its format MAY differ between -// SDK versions. String MAY return same result as EncodeToString. String MUST NOT -// be used to encode ID into NeoFS protocol string. +// SDK versions. String MAY return same result as [ID.EncodeToString]. String +// MUST NOT be used to encode ID into NeoFS protocol string. func (id ID) String() string { return id.EncodeToString() } -// CalculateIDSignature signs object id with provided key. -func (id ID) CalculateIDSignature(signer neofscrypto.Signer) (neofscrypto.Signature, error) { - data, err := id.Marshal() - if err != nil { - return neofscrypto.Signature{}, fmt.Errorf("marshal ID: %w", err) - } - - var sig neofscrypto.Signature - - return sig, sig.Calculate(signer, data) -} - -// Marshal marshals ID into a protobuf binary form. -func (id ID) Marshal() ([]byte, error) { - var v2 refs.ObjectID - v2.SetValue(id[:]) - - return v2.StableMarshal(nil), nil -} - -// Unmarshal unmarshals protobuf binary representation of ID. -func (id *ID) Unmarshal(data []byte) error { - var v2 refs.ObjectID - if err := v2.Unmarshal(data); err != nil { - return err - } - - copy(id[:], v2.GetValue()) - - return nil -} - -// MarshalJSON encodes ID to protobuf JSON format. -func (id ID) MarshalJSON() ([]byte, error) { - var v2 refs.ObjectID - v2.SetValue(id[:]) - - return v2.MarshalJSON() +// Marshal encodes ID into a binary format of the NeoFS API protocol +// (Protocol Buffers with direct field order). +// +// See also [ID.Unmarshal]. +func (id ID) Marshal() []byte { + var m refs.ObjectID + id.WriteToV2(&m) + b := make([]byte, m.MarshaledSize()) + m.MarshalStable(b) + return b } -// UnmarshalJSON decodes ID from protobuf JSON format. -func (id *ID) UnmarshalJSON(data []byte) error { - var v2 refs.ObjectID - if err := v2.UnmarshalJSON(data); err != nil { - return err +// IsZero checks whether ID is zero. +func (id ID) IsZero() bool { + for i := range id { + if id[i] != 0 { + return false + } } - - copy(id[:], v2.GetValue()) - - return nil + return true } diff --git a/object/id/id_test.go b/object/id/id_test.go index baa1b221d..8130b64af 100644 --- a/object/id/id_test.go +++ b/object/id/id_test.go @@ -1,182 +1,128 @@ -package oid +package oid_test import ( - "crypto/rand" - "crypto/sha256" - "strconv" "testing" - "github.com/mr-tron/base58" - "github.com/nspcc-dev/neofs-api-go/v2/refs" + "github.com/nspcc-dev/neofs-sdk-go/api/refs" + oid "github.com/nspcc-dev/neofs-sdk-go/object/id" + oidtest "github.com/nspcc-dev/neofs-sdk-go/object/id/test" "github.com/stretchr/testify/require" + "google.golang.org/protobuf/proto" ) -const emptyID = "11111111111111111111111111111111" - -func randID(t *testing.T) ID { - var id ID - id.SetSHA256(randSHA256Checksum(t)) - - return id -} - -func randSHA256Checksum(t *testing.T) (cs [sha256.Size]byte) { - _, err := rand.Read(cs[:]) - require.NoError(t, err) - - return -} - -func TestIDV2(t *testing.T) { - var id ID - - checksum := [sha256.Size]byte{} - - _, err := rand.Read(checksum[:]) - require.NoError(t, err) - - id.SetSHA256(checksum) - - var idV2 refs.ObjectID - id.WriteToV2(&idV2) - - require.Equal(t, checksum[:], idV2.GetValue()) +func TestIDComparable(t *testing.T) { + id1 := oidtest.ID() + require.True(t, id1 == id1) + id2 := oidtest.ChangeID(id1) + require.NotEqual(t, id1, id2) + require.False(t, id1 == id2) } -func TestID_Equal(t *testing.T) { - cs := randSHA256Checksum(t) - - var id1 ID - id1.SetSHA256(cs) - - var id2 ID - id2.SetSHA256(cs) - - var id3 ID - id3.SetSHA256(randSHA256Checksum(t)) - - require.True(t, id1.Equals(id2)) - require.False(t, id1.Equals(id3)) -} - -func TestID_Parse(t *testing.T) { - t.Run("should parse successful", func(t *testing.T) { - for i := 0; i < 10; i++ { - t.Run(strconv.Itoa(i), func(t *testing.T) { - cs := randSHA256Checksum(t) - str := base58.Encode(cs[:]) - var oid ID - - require.NoError(t, oid.DecodeString(str)) - - var oidV2 refs.ObjectID - oid.WriteToV2(&oidV2) - - require.Equal(t, cs[:], oidV2.GetValue()) - }) - } +func TestID_ReadFromV2(t *testing.T) { + t.Run("missing fields", func(t *testing.T) { + t.Run("value", func(t *testing.T) { + id := oidtest.ID() + var m refs.ObjectID + + id.WriteToV2(&m) + m.Value = nil + require.ErrorContains(t, id.ReadFromV2(&m), "missing value field") + m.Value = []byte{} + require.ErrorContains(t, id.ReadFromV2(&m), "missing value field") + }) }) - - t.Run("should failure on parse", func(t *testing.T) { - for i := 0; i < 10; i++ { - j := i - t.Run(strconv.Itoa(j), func(t *testing.T) { - cs := []byte{1, 2, 3, 4, 5, byte(j)} - str := base58.Encode(cs) - var oid ID - - require.Error(t, oid.DecodeString(str)) - }) - } + t.Run("invalid fields", func(t *testing.T) { + t.Run("value", func(t *testing.T) { + id := oidtest.ID() + var m refs.ObjectID + + id.WriteToV2(&m) + m.Value = make([]byte, 31) + require.ErrorContains(t, id.ReadFromV2(&m), "invalid value length 31") + m.Value = make([]byte, 33) + require.ErrorContains(t, id.ReadFromV2(&m), "invalid value length 33") + }) }) } -func TestID_String(t *testing.T) { - t.Run("zero", func(t *testing.T) { - var id ID - require.Equal(t, emptyID, id.EncodeToString()) - }) - - t.Run("should be equal", func(t *testing.T) { - for i := 0; i < 10; i++ { - t.Run(strconv.Itoa(i), func(t *testing.T) { - cs := randSHA256Checksum(t) - str := base58.Encode(cs[:]) - var oid ID - - require.NoError(t, oid.DecodeString(str)) - require.Equal(t, str, oid.EncodeToString()) - }) +func TestID_DecodeString(t *testing.T) { + var id oid.ID + + const zeroIDString = "11111111111111111111111111111111" + require.Equal(t, zeroIDString, id.EncodeToString()) + id = oidtest.ChangeID(id) + require.NoError(t, id.DecodeString(zeroIDString)) + require.Equal(t, zeroIDString, id.EncodeToString()) + require.Zero(t, id) + + var bin = [32]byte{231, 129, 236, 104, 74, 71, 155, 100, 72, 209, 186, 80, 2, 184, 9, 161, 10, 76, 18, 203, 126, 94, 101, 42, 157, 211, 66, 99, 247, 143, 226, 23} + const str = "Gai5pjZVewwmscQ5UczQbj2W8Wkh9d1BGUoRNzjR6QCN" + require.NoError(t, id.DecodeString(str)) + require.Equal(t, str, id.EncodeToString()) + require.EqualValues(t, bin, id) + + var binOther = [32]byte{216, 146, 23, 99, 156, 90, 232, 244, 202, 213, 0, 92, 22, 194, 164, 150, 233, 163, 175, 199, 187, 45, 65, 7, 190, 124, 77, 99, 8, 172, 36, 112} + const strOther = "FaQGU3PHuHjhHbce1u8AuHuabx4Ra9CxREsMcZffXwM1" + require.NoError(t, id.DecodeString(strOther)) + require.Equal(t, strOther, id.EncodeToString()) + require.EqualValues(t, binOther, id) + + t.Run("invalid", func(t *testing.T) { + var id oid.ID + for _, testCase := range []struct{ input, err string }{ + {input: "not_a_base58_string", err: "decode base58"}, + {input: "", err: "invalid value length 0"}, + {input: "qxAE9SLuDq7dARPAFaWG6vbuGoocwoTn19LK5YVqnS", err: "invalid value length 31"}, + {input: "HJJEkEKthnvMw7NsZNgzBEQ4tf9AffmaBYWxfBULvvbPW", err: "invalid value length 33"}, + } { + require.ErrorContains(t, id.DecodeString(testCase.input), testCase.err, testCase) } }) -} - -func TestObjectIDEncoding(t *testing.T) { - id := randID(t) - - t.Run("binary", func(t *testing.T) { - data, err := id.Marshal() - require.NoError(t, err) - - var id2 ID - require.NoError(t, id2.Unmarshal(data)) - - require.Equal(t, id, id2) - }) + t.Run("encoding", func(t *testing.T) { + t.Run("binary", func(t *testing.T) { + var src oid.ID + var msg refs.ObjectID - t.Run("json", func(t *testing.T) { - data, err := id.MarshalJSON() - require.NoError(t, err) + msg.Value = []byte("any") - var id2 ID - require.NoError(t, id2.UnmarshalJSON(data)) + require.NoError(t, proto.Unmarshal(src.Marshal(), &msg)) + require.Equal(t, make([]byte, 32), msg.Value) - require.Equal(t, id, id2) - }) -} + require.NoError(t, src.DecodeString(str)) -func TestNewIDFromV2(t *testing.T) { - t.Run("from zero", func(t *testing.T) { - var ( - x ID - v2 refs.ObjectID - ) - - require.Error(t, x.ReadFromV2(v2)) - }) -} - -func TestID_ToV2(t *testing.T) { - t.Run("zero to v2", func(t *testing.T) { - var ( - x ID - v2 refs.ObjectID - ) - - x.WriteToV2(&v2) - - require.Equal(t, sha256.Size, len(v2.GetValue())) - require.Equal(t, emptyID, base58.Encode(v2.GetValue())) - }) -} - -func TestID_Encode(t *testing.T) { - var id ID - - t.Run("panic", func(t *testing.T) { - dst := make([]byte, sha256.Size-1) - - require.Panics(t, func() { - id.Encode(dst) + require.NoError(t, proto.Unmarshal(src.Marshal(), &msg)) + require.Equal(t, bin[:], msg.Value) }) - }) - - t.Run("correct", func(t *testing.T) { - dst := make([]byte, sha256.Size) - - require.NotPanics(t, func() { - id.Encode(dst) + t.Run("api", func(t *testing.T) { + var src, dst oid.ID + var msg refs.ObjectID + + require.NoError(t, dst.DecodeString(str)) + + src.WriteToV2(&msg) + require.Equal(t, make([]byte, 32), msg.Value) + require.NoError(t, dst.ReadFromV2(&msg)) + require.Zero(t, dst) + require.Equal(t, zeroIDString, dst.EncodeToString()) + + require.NoError(t, src.DecodeString(str)) + + src.WriteToV2(&msg) + require.Equal(t, bin[:], msg.Value) + err := dst.ReadFromV2(&msg) + require.NoError(t, err) + require.EqualValues(t, bin, dst) + require.Equal(t, str, dst.EncodeToString()) }) - require.Equal(t, emptyID, id.EncodeToString()) }) } + +func TestID_IsZero(t *testing.T) { + var id oid.ID + require.True(t, id.IsZero()) + for i := range id { + id2 := id + id2[i]++ + require.False(t, id2.IsZero()) + } +} diff --git a/object/id/test/generate.go b/object/id/test/generate.go index 75fa07800..db6658d9b 100644 --- a/object/id/test/generate.go +++ b/object/id/test/generate.go @@ -1,7 +1,6 @@ package oidtest import ( - "crypto/sha256" "math/rand" cidtest "github.com/nspcc-dev/neofs-sdk-go/container/id/test" @@ -10,22 +9,27 @@ import ( // ID returns random oid.ID. func ID() oid.ID { - checksum := [sha256.Size]byte{} + var res oid.ID //nolint:staticcheck - rand.Read(checksum[:]) - - return idWithChecksum(checksum) + rand.Read(res[:]) + return res } -// idWithChecksum returns oid.ID initialized -// with specified checksum. -func idWithChecksum(cs [sha256.Size]byte) oid.ID { - var id oid.ID - id.SetSHA256(cs) - +// ChangeID returns object ID other than the given one. +func ChangeID(id oid.ID) oid.ID { + id[0]++ return id } +// NIDs returns n random oid.ID instances. +func NIDs(n int) []oid.ID { + res := make([]oid.ID, n) + for i := range res { + res[i] = ID() + } + return res +} + // Address returns random oid.Address. func Address() oid.Address { var x oid.Address @@ -35,3 +39,20 @@ func Address() oid.Address { return x } + +// ChangeAddress returns object address other than the given one. +func ChangeAddress(addr oid.Address) oid.Address { + var res oid.Address + res.SetObject(ChangeID(addr.Object())) + res.SetContainer(cidtest.ChangeID(addr.Container())) + return res +} + +// NAddresses returns n random oid.Address instances. +func NAddresses(n int) []oid.Address { + res := make([]oid.Address, n) + for i := range res { + res[i] = Address() + } + return res +} diff --git a/object/id/test/generate_test.go b/object/id/test/generate_test.go new file mode 100644 index 000000000..fa87a9c35 --- /dev/null +++ b/object/id/test/generate_test.go @@ -0,0 +1,51 @@ +package oidtest_test + +import ( + "math/rand" + "testing" + + "github.com/nspcc-dev/neofs-sdk-go/api/refs" + oid "github.com/nspcc-dev/neofs-sdk-go/object/id" + oidtest "github.com/nspcc-dev/neofs-sdk-go/object/id/test" + "github.com/stretchr/testify/require" +) + +func TestID(t *testing.T) { + id := oidtest.ID() + require.NotEqual(t, id, oidtest.ID()) + + var m refs.ObjectID + id.WriteToV2(&m) + var id2 oid.ID + require.NoError(t, id2.ReadFromV2(&m)) +} + +func TestChangeID(t *testing.T) { + id := oidtest.ID() + require.NotEqual(t, id, oidtest.ChangeID(id)) +} + +func TestNIDs(t *testing.T) { + n := rand.Int() % 10 + require.Len(t, oidtest.NIDs(n), n) +} + +func TestAddress(t *testing.T) { + a := oidtest.Address() + require.NotEqual(t, a, oidtest.Address()) + + var m refs.Address + a.WriteToV2(&m) + var id2 oid.Address + require.NoError(t, id2.ReadFromV2(&m)) +} + +func TestChangeAddress(t *testing.T) { + a := oidtest.Address() + require.NotEqual(t, a, oidtest.ChangeAddress(a)) +} + +func TestNAddresses(t *testing.T) { + n := rand.Int() % 10 + require.Len(t, oidtest.NAddresses(n), n) +} diff --git a/object/link.go b/object/link.go deleted file mode 100644 index 46114785e..000000000 --- a/object/link.go +++ /dev/null @@ -1,110 +0,0 @@ -package object - -import ( - v2object "github.com/nspcc-dev/neofs-api-go/v2/object" - "github.com/nspcc-dev/neofs-api-go/v2/refs" - oid "github.com/nspcc-dev/neofs-sdk-go/object/id" -) - -// Link is a payload of helper objects that contain the full list of the split -// chain of the big NeoFS objects. It is compatible with NeoFS API V2 protocol. -// -// Link instance can be written to the [Object], see -// [Object.WriteLink]/[Object.ReadLink]. -type Link v2object.Link - -// WriteLink writes a link to the Object, and sets its type to [TypeLink]. -// -// See also ReadLink. -func (o *Object) WriteLink(l Link) { - o.SetType(TypeLink) - o.SetPayload(l.Marshal()) -} - -// ReadLink reads a link from the [Object]. The link must not be nil. -// Returns an error describing incorrect format. Makes sense only -// if the object has [TypeLink] type. -// -// See also [Object.WriteLink]. -func (o *Object) ReadLink(l *Link) error { - return l.Unmarshal(o.Payload()) -} - -// Marshal encodes the [Link] into a NeoFS protocol binary format. -// -// See also [Link.Unmarshal]. -func (l *Link) Marshal() []byte { - return (*v2object.Link)(l).StableMarshal(nil) -} - -// Unmarshal decodes the [Link] from its NeoFS protocol binary representation. -// -// See also [Link.Marshal]. -func (l *Link) Unmarshal(data []byte) error { - return (*v2object.Link)(l).Unmarshal(data) -} - -// MeasuredObject groups object ID and its size length. It is compatible with -// NeoFS API V2 protocol. -type MeasuredObject v2object.MeasuredObject - -// SetObjectID sets object identifier. -// -// See also [MeasuredObject.ObjectID]. -func (m *MeasuredObject) SetObjectID(id oid.ID) { - var idV2 refs.ObjectID - id.WriteToV2(&idV2) - - m.ID = idV2 -} - -// ObjectID returns object identifier. -// -// See also [MeasuredObject.SetObjectID]. -func (m *MeasuredObject) ObjectID() oid.ID { - var id oid.ID - _ = id.ReadFromV2(m.ID) - - return id -} - -// SetObjectSize sets size of the object. -// -// See also [MeasuredObject.ObjectSize]. -func (m *MeasuredObject) SetObjectSize(s uint32) { - m.Size = s -} - -// ObjectSize returns size of the object. -// -// See also [MeasuredObject.SetObjectSize]. -func (m *MeasuredObject) ObjectSize() uint32 { - return m.Size -} - -// Objects returns split chain's measured objects. -// -// See also [Link.SetObjects]. -func (l *Link) Objects() []MeasuredObject { - res := make([]MeasuredObject, (*v2object.Link)(l).NumberOfChildren()) - var i int - - (*v2object.Link)(l).IterateChildren(func(object v2object.MeasuredObject) { - res[i] = MeasuredObject(object) - i++ - }) - - return res -} - -// SetObjects sets split chain's measured objects. -// -// See also [Link.Objects]. -func (l *Link) SetObjects(oo []MeasuredObject) { - v2OO := make([]v2object.MeasuredObject, len(oo)) - for i, o := range oo { - v2OO[i] = v2object.MeasuredObject(o) - } - - (*v2object.Link)(l).SetChildren(v2OO) -} diff --git a/object/link_test.go b/object/link_test.go deleted file mode 100644 index 410fb599d..000000000 --- a/object/link_test.go +++ /dev/null @@ -1,38 +0,0 @@ -package object_test - -import ( - "testing" - - "github.com/nspcc-dev/neofs-sdk-go/object" - objecttest "github.com/nspcc-dev/neofs-sdk-go/object/test" - "github.com/stretchr/testify/require" -) - -func TestLinkEncoding(t *testing.T) { - link := *objecttest.Link() - - t.Run("binary", func(t *testing.T) { - data := link.Marshal() - - var link2 object.Link - require.NoError(t, link2.Unmarshal(data)) - - require.Equal(t, link, link2) - }) -} - -func TestWriteLink(t *testing.T) { - link := objecttest.Link() - var link2 object.Link - var o object.Object - - o.WriteLink(*link) - - require.NoError(t, o.ReadLink(&link2)) - require.Equal(t, *link, link2) - - // corrupt payload - o.Payload()[0]++ - - require.Error(t, o.ReadLink(&link2)) -} diff --git a/object/lock.go b/object/lock.go index a3972899d..85bebd5dc 100644 --- a/object/lock.go +++ b/object/lock.go @@ -1,78 +1,103 @@ package object import ( - v2object "github.com/nspcc-dev/neofs-api-go/v2/object" - "github.com/nspcc-dev/neofs-api-go/v2/refs" + "errors" + "fmt" + + "github.com/nspcc-dev/neofs-sdk-go/api/lock" + "github.com/nspcc-dev/neofs-sdk-go/api/refs" oid "github.com/nspcc-dev/neofs-sdk-go/object/id" + "google.golang.org/protobuf/proto" ) -// Lock represents record with locked objects. It is compatible with -// NeoFS API V2 protocol. -// -// Lock instance can be written to the [Object], see WriteLock/ReadLock. -type Lock v2object.Lock - -// WriteLock writes [Lock] to the [Object], and sets its type to [TypeLock]. -// -// See also ReadLock. -func (o *Object) WriteLock(l Lock) { - o.SetType(TypeLock) - o.SetPayload(l.Marshal()) +// Lock represents record with locked objects, i.e. objects protected from +// deletion. SplitChain is stored and transmitted as payload of system NeoFS +// objects. +type Lock struct { + list []oid.ID } -// ReadLock reads [Lock] from the [Object]. The lock must not be nil. -// Returns an error describing incorrect format. Makes sense only -// if object has [TypeLock] type. +// readFromV2 reads Lock from the lock.Lock message. Returns an error if the +// message is malformed according to the NeoFS API V2 protocol. The message must +// not be nil. // -// See also [Object.WriteLock]. -func (o *Object) ReadLock(l *Lock) error { - return l.Unmarshal(o.Payload()) -} - -// NumberOfMembers returns number of members in lock list. -func (x Lock) NumberOfMembers() int { - return (*v2object.Lock)(&x).NumberOfMembers() -} - -// ReadMembers reads list of locked members. +// ReadFromV2 is intended to be used by the NeoFS API V2 client/server +// implementation only and is not expected to be directly used by applications. // -// Buffer length must not be less than [Lock.NumberOfMembers]. -func (x Lock) ReadMembers(buf []oid.ID) { - var i int +// See also writeToV2. +func (x *Lock) readFromV2(m *lock.Lock) error { + if len(m.Members) == 0 { + return errors.New("missing members") + } + + x.list = make([]oid.ID, len(m.Members)) + for i := range m.Members { + err := x.list[i].ReadFromV2(m.Members[i]) + if err != nil { + return fmt.Errorf("invalid member #%d: %w", i, err) + } + } - (*v2object.Lock)(&x).IterateMembers(func(idV2 refs.ObjectID) { - _ = buf[i].ReadFromV2(idV2) - i++ - }) + return nil } -// WriteMembers writes list of locked members. +// writeToV2 writes Lock to the lock.Lock message of the NeoFS API protocol. // -// See also [Lock.ReadMembers]. -func (x *Lock) WriteMembers(ids []oid.ID) { - var members []refs.ObjectID - - if ids != nil { - members = make([]refs.ObjectID, len(ids)) - - for i := range ids { - ids[i].WriteToV2(&members[i]) +// WriteToV2 is intended to be used by the NeoFS API V2 client/server +// implementation only and is not expected to be directly used by applications. +// +// See also readFromV2. +func (x Lock) writeToV2(m *lock.Lock) { + if len(x.list) > 0 { + m.Members = make([]*refs.ObjectID, len(x.list)) + for i := range x.list { + m.Members[i] = new(refs.ObjectID) + x.list[i].WriteToV2(m.Members[i]) } + } else { + x.list = nil } +} - (*v2object.Lock)(x).SetMembers(members) +// List returns list of locked objects. +// +// See also [Lock.SetList]. +func (x Lock) List() []oid.ID { + return x.list } -// Marshal encodes the [Lock] into a NeoFS protocol binary format. +// SetList sets list of locked objects. +// +// See also [Lock.List]. +func (x *Lock) SetList(ids []oid.ID) { + x.list = ids +} + +// Marshal encodes Lock into a Protocol Buffers V3 binary format. // // See also [Lock.Unmarshal]. func (x Lock) Marshal() []byte { - return (*v2object.Lock)(&x).StableMarshal(nil) + var m lock.Lock + x.writeToV2(&m) + + b, err := proto.Marshal(&m) + if err != nil { + // while it is bad to panic on external package return, we can do nothing better + // for this case: how can a normal message not be encoded? + panic(fmt.Errorf("unexpected marshal protobuf message failure: %w", err)) + } + return b } -// Unmarshal decodes the [Lock] from its NeoFS protocol binary representation. +// Unmarshal decodes Protocol Buffers V3 binary data into the Lock. Returns an +// error if the message is malformed according to the NeoFS API V2 protocol. // // See also [Lock.Marshal]. func (x *Lock) Unmarshal(data []byte) error { - return (*v2object.Lock)(x).Unmarshal(data) + var m lock.Lock + err := proto.Unmarshal(data, &m) + if err != nil { + return fmt.Errorf("decode protobuf: %w", err) + } + return x.readFromV2(&m) } diff --git a/object/lock_test.go b/object/lock_test.go index 72fec4501..4886d3219 100644 --- a/object/lock_test.go +++ b/object/lock_test.go @@ -1,39 +1,3 @@ package object_test -import ( - "testing" - - "github.com/nspcc-dev/neofs-sdk-go/object" - objecttest "github.com/nspcc-dev/neofs-sdk-go/object/test" - "github.com/stretchr/testify/require" -) - -func TestLockEncoding(t *testing.T) { - l := *objecttest.Lock() - - t.Run("binary", func(t *testing.T) { - data := l.Marshal() - - var l2 object.Lock - require.NoError(t, l2.Unmarshal(data)) - - require.Equal(t, l, l2) - }) -} - -func TestWriteLock(t *testing.T) { - l := *objecttest.Lock() - var o object.Object - - o.WriteLock(l) - - var l2 object.Lock - - require.NoError(t, o.ReadLock(&l2)) - require.Equal(t, l, l2) - - // corrupt payload - o.Payload()[0]++ - - require.Error(t, o.ReadLock(&l2)) -} +// TODO diff --git a/object/object.go b/object/object.go index ca5b03a9d..9dc5a8508 100644 --- a/object/object.go +++ b/object/object.go @@ -2,792 +2,271 @@ package object import ( "bytes" - "errors" + "crypto/sha256" "fmt" - "strings" - "github.com/nspcc-dev/neofs-api-go/v2/object" - "github.com/nspcc-dev/neofs-api-go/v2/refs" - v2session "github.com/nspcc-dev/neofs-api-go/v2/session" - "github.com/nspcc-dev/neofs-sdk-go/checksum" + "github.com/nspcc-dev/neofs-sdk-go/api/object" + "github.com/nspcc-dev/neofs-sdk-go/api/refs" cid "github.com/nspcc-dev/neofs-sdk-go/container/id" neofscrypto "github.com/nspcc-dev/neofs-sdk-go/crypto" oid "github.com/nspcc-dev/neofs-sdk-go/object/id" - "github.com/nspcc-dev/neofs-sdk-go/session" "github.com/nspcc-dev/neofs-sdk-go/user" "github.com/nspcc-dev/neofs-sdk-go/version" + "google.golang.org/protobuf/encoding/protojson" + "google.golang.org/protobuf/proto" ) -// Object represents in-memory structure of the NeoFS object. -// Type is compatible with NeoFS API V2 protocol. -// -// Instance can be created depending on scenario: -// - [Object.InitCreation] (an object to be placed in container); -// - New (blank instance, usually needed for decoding); -// - NewFromV2 (when working under NeoFS API V2 protocol). -type Object object.Object - -// RequiredFields contains the minimum set of object data that must be set -// by the NeoFS user at the stage of creation. -type RequiredFields struct { - // Identifier of the NeoFS container associated with the object. - Container cid.ID - - // Object owner's user ID in the NeoFS system. - Owner user.ID +// common interface of [Object] and [Header] allowing to use any of them. +type objectOrHeader interface { + Attribute(string) string } -// InitCreation initializes the object instance with minimum set of required fields. -func (o *Object) InitCreation(rf RequiredFields) { - o.SetContainerID(rf.Container) - o.SetOwnerID(&rf.Owner) -} - -// NewFromV2 wraps v2 [object.Object] message to [Object]. -func NewFromV2(oV2 *object.Object) *Object { - return (*Object)(oV2) -} - -// New creates and initializes blank [Object]. -// -// Works similar as NewFromV2(new(Object)). -func New() *Object { - return NewFromV2(new(object.Object)) +// common interface of [*Object] and [*Header] allowing to use any of them. +type objectOrHeaderPtr interface { + objectOrHeader + SetAttribute(string, string) } -// ToV2 converts [Object] to v2 [object.Object] message. -// -// The value returned shares memory with the structure itself, so changing it can lead to data corruption. -// Make a copy if you need to change it. -func (o *Object) ToV2() *object.Object { - return (*object.Object)(o) -} - -// CopyTo writes deep copy of the [Object] to dst. -func (o Object) CopyTo(dst *Object) { - id := (*object.Object)(&o).GetObjectID() - (*object.Object)(dst).SetObjectID(copyObjectID(id)) - - sig := (*object.Object)(&o).GetSignature() - (*object.Object)(dst).SetSignature(copySignature(sig)) - - header := (*object.Object)(&o).GetHeader() - (*object.Object)(dst).SetHeader(copyHeader(header)) - - dst.SetPayload(bytes.Clone(o.Payload())) -} - -// MarshalHeaderJSON marshals object's header into JSON format. -func (o *Object) MarshalHeaderJSON() ([]byte, error) { - return (*object.Object)(o).GetHeader().MarshalJSON() -} - -func (o *Object) setHeaderField(setter func(*object.Header)) { - obj := (*object.Object)(o) - h := obj.GetHeader() - - if h == nil { - h = new(object.Header) - obj.SetHeader(h) - } - - setter(h) -} - -func (o *Object) setSplitFields(setter func(*object.SplitHeader)) { - o.setHeaderField(func(h *object.Header) { - split := h.GetSplit() - if split == nil { - split = new(object.SplitHeader) - h.SetSplit(split) - } - - setter(split) - }) -} - -// ID returns object identifier. +// Object represents in-memory structure of the NeoFS object. // -// See also [Object.SetID]. -func (o *Object) ID() (v oid.ID, isSet bool) { - v2 := (*object.Object)(o) - if id := v2.GetObjectID(); id != nil { - err := v.ReadFromV2(*v2.GetObjectID()) - isSet = (err == nil) - } +// Object is mutually compatible with [object.Object] message. See +// [Object.ReadFromV2] / [Object.WriteToV2] methods. +type Object struct { + Header - return -} + id oid.ID + sig neofscrypto.Signature + payload []byte -// SetID sets object identifier. -// -// See also [Object.ID]. -func (o *Object) SetID(v oid.ID) { - var v2 refs.ObjectID - v.WriteToV2(&v2) - - (*object.Object)(o). - SetObjectID(&v2) + idSet, sigSet bool } -// ResetID removes object identifier. -// -// See also [Object.SetID]. -func (o *Object) ResetID() { - (*object.Object)(o). - SetObjectID(nil) +// New constructs new Object owned by the specified user and associated with +// particular container. +func New(cnr cid.ID, owner user.ID) Object { + var obj Object + obj.version, obj.verSet = version.Current, true + obj.SetContainerID(cnr) + obj.SetOwnerID(owner) + return obj } -// Signature returns signature of the object identifier. -// -// See also [Object.SetSignature]. -func (o *Object) Signature() *neofscrypto.Signature { - sigv2 := (*object.Object)(o).GetSignature() - if sigv2 == nil { - return nil +// CopyTo writes deep copy of the [Object] to dst. +func (o Object) CopyTo(dst *Object) { + o.Header.CopyTo(&dst.Header) + if dst.idSet = o.idSet; dst.idSet { + dst.id = o.id } - - var sig neofscrypto.Signature - if err := sig.ReadFromV2(*sigv2); err != nil { - return nil + if dst.sigSet = o.sigSet; dst.sigSet { + o.sig.CopyTo(&dst.sig) } - - return &sig + dst.payload = bytes.Clone(o.Payload()) } -// SetSignature sets signature of the object identifier. -// -// See also [Object.Signature]. -func (o *Object) SetSignature(v *neofscrypto.Signature) { - var sigv2 *refs.Signature - - if v != nil { - sigv2 = new(refs.Signature) - - v.WriteToV2(sigv2) - } - - (*object.Object)(o).SetSignature(sigv2) -} - -// Payload returns payload bytes. -// -// The value returned shares memory with the structure itself, so changing it can lead to data corruption. -// Make a copy if you need to change it. -// -// See also [Object.SetPayload]. -func (o *Object) Payload() []byte { - return (*object.Object)(o).GetPayload() -} - -// SetPayload sets payload bytes. -// -// See also [Object.Payload]. -func (o *Object) SetPayload(v []byte) { - (*object.Object)(o).SetPayload(v) -} - -// Version returns version of the object. -// -// See also [Object.SetVersion]. -func (o *Object) Version() *version.Version { - var ver version.Version - if verV2 := (*object.Object)(o).GetHeader().GetVersion(); verV2 != nil { - if err := ver.ReadFromV2(*verV2); err != nil { - return nil +func (o *Object) readFromV2(m *object.Object, checkFieldPresence bool) error { + if m.Header != nil { + if err := o.Header.readFromV2(m.Header, checkFieldPresence); err != nil { + return fmt.Errorf("invalid header: %w", err) } + } else { + o.Header = Header{} } - return &ver -} - -// SetVersion sets version of the object. -// -// See also [Object.Version]. -func (o *Object) SetVersion(v *version.Version) { - var verV2 refs.Version - v.WriteToV2(&verV2) - - o.setHeaderField(func(h *object.Header) { - h.SetVersion(&verV2) - }) -} - -// PayloadSize returns payload length of the object. -// -// See also [Object.SetPayloadSize]. -func (o *Object) PayloadSize() uint64 { - return (*object.Object)(o). - GetHeader(). - GetPayloadLength() -} - -// SetPayloadSize sets payload length of the object. -// -// See also [Object.PayloadSize]. -func (o *Object) SetPayloadSize(v uint64) { - o.setHeaderField(func(h *object.Header) { - h.SetPayloadLength(v) - }) -} - -// ContainerID returns identifier of the related container. -// -// See also [Object.SetContainerID]. -func (o *Object) ContainerID() (v cid.ID, isSet bool) { - v2 := (*object.Object)(o) - - cidV2 := v2.GetHeader().GetContainerID() - if cidV2 != nil { - err := v.ReadFromV2(*cidV2) - isSet = (err == nil) + if o.idSet = m.ObjectId != nil; o.idSet { + if err := o.id.ReadFromV2(m.ObjectId); err != nil { + return fmt.Errorf("invalid ID: %w", err) + } } - - return -} - -// SetContainerID sets identifier of the related container. -// -// See also [Object.ContainerID]. -func (o *Object) SetContainerID(v cid.ID) { - var cidV2 refs.ContainerID - v.WriteToV2(&cidV2) - - o.setHeaderField(func(h *object.Header) { - h.SetContainerID(&cidV2) - }) -} - -// OwnerID returns identifier of the object owner. -// -// See also [Object.SetOwnerID]. -func (o *Object) OwnerID() *user.ID { - var id user.ID - - m := (*object.Object)(o).GetHeader().GetOwnerID() - if m != nil { - _ = id.ReadFromV2(*m) + if o.sigSet = m.Signature != nil; o.sigSet { + if err := o.sig.ReadFromV2(m.Signature); err != nil { + return fmt.Errorf("invalid signature: %w", err) + } } - - return &id -} - -// SetOwnerID sets identifier of the object owner. -// -// See also [Object.OwnerID]. -func (o *Object) SetOwnerID(v *user.ID) { - o.setHeaderField(func(h *object.Header) { - var m refs.OwnerID - v.WriteToV2(&m) - - h.SetOwnerID(&m) - }) + o.payload = m.Payload + return nil } -// CreationEpoch returns epoch number in which object was created. +// ReadFromV2 reads Object from the object.Object message. Returns an error if +// the message is malformed according to the NeoFS API V2 protocol. The message +// must not be nil. // -// See also [Object.SetCreationEpoch]. -func (o *Object) CreationEpoch() uint64 { - return (*object.Object)(o). - GetHeader(). - GetCreationEpoch() -} - -// SetCreationEpoch sets epoch number in which object was created. +// ReadFromV2 is intended to be used by the NeoFS API V2 client/server +// implementation only and is not expected to be directly used by applications. // -// See also [Object.CreationEpoch]. -func (o *Object) SetCreationEpoch(v uint64) { - o.setHeaderField(func(h *object.Header) { - h.SetCreationEpoch(v) - }) +// See also [Object.WriteToV2]. +func (o *Object) ReadFromV2(m *object.Object) error { + return o.readFromV2(m, true) } -// PayloadChecksum returns checksum of the object payload and -// bool that indicates checksum presence in the object. +// WriteToV2 writes Object to the object.Object message of the NeoFS API +// protocol. // -// Zero [Object] does not have payload checksum. +// WriteToV2 is intended to be used by the NeoFS API V2 client/server +// implementation only and is not expected to be directly used by applications. // -// See also [Object.SetPayloadChecksum]. -func (o *Object) PayloadChecksum() (checksum.Checksum, bool) { - var v checksum.Checksum - v2 := (*object.Object)(o) - - if hash := v2.GetHeader().GetPayloadHash(); hash != nil { - err := v.ReadFromV2(*hash) - return v, (err == nil) +// See also [Object.ReadFromV2]. +func (o Object) WriteToV2(m *object.Object) { + if o.idSet { + m.ObjectId = new(refs.ObjectID) + o.id.WriteToV2(m.ObjectId) + } else { + m.ObjectId = nil } - - return v, false -} - -// SetPayloadChecksum sets checksum of the object payload. -// -// See also [Object.PayloadChecksum]. -func (o *Object) SetPayloadChecksum(v checksum.Checksum) { - var v2 refs.Checksum - v.WriteToV2(&v2) - - o.setHeaderField(func(h *object.Header) { - h.SetPayloadHash(&v2) - }) -} - -// PayloadHomomorphicHash returns homomorphic hash of the object -// payload and bool that indicates checksum presence in the object. -// -// Zero [Object] does not have payload homomorphic checksum. -// -// See also [Object.SetPayloadHomomorphicHash]. -func (o *Object) PayloadHomomorphicHash() (checksum.Checksum, bool) { - var v checksum.Checksum - v2 := (*object.Object)(o) - - if hash := v2.GetHeader().GetHomomorphicHash(); hash != nil { - err := v.ReadFromV2(*hash) - return v, (err == nil) + if o.sigSet { + m.Signature = new(refs.Signature) + o.sig.WriteToV2(m.Signature) } - - return v, false -} - -// SetPayloadHomomorphicHash sets homomorphic hash of the object payload. -// -// See also [Object.PayloadHomomorphicHash]. -func (o *Object) SetPayloadHomomorphicHash(v checksum.Checksum) { - var v2 refs.Checksum - v.WriteToV2(&v2) - - o.setHeaderField(func(h *object.Header) { - h.SetHomomorphicHash(&v2) - }) + // the header may be zero, but this is cumbersome to check, and the pointer to + // an empty structure does not differ from nil when transmitted + m.Header = new(object.Header) + o.Header.WriteToV2(m.Header) + m.Payload = o.payload } -// Attributes returns all object attributes. -// -// The value returned shares memory with the structure itself, so changing it can lead to data corruption. -// Make a copy if you need to change it. +// MarshaledSize returns length of the Object encoded into the binary format of +// the NeoFS API protocol (Protocol Buffers V3 with direct field order). // -// See also [Object.SetAttributes], [Object.UserAttributes]. -func (o *Object) Attributes() []Attribute { - attrs := (*object.Object)(o). - GetHeader(). - GetAttributes() - - res := make([]Attribute, len(attrs)) - - for i := range attrs { - res[i] = *NewAttributeFromV2(&attrs[i]) - } - - return res +// See also [Object.Marshal]. +func (o Object) MarshaledSize() int { + var m object.Object + o.WriteToV2(&m) + return m.MarshaledSize() } -// UserAttributes returns user attributes of the Object. -// -// The value returned shares memory with the structure itself, so changing it can lead to data corruption. -// Make a copy if you need to change it. +// Marshal encodes Object into a binary format of the NeoFS API protocol +// (Protocol Buffers V3 with direct field order). // -// See also [Object.SetAttributes], [Object.Attributes]. -func (o *Object) UserAttributes() []Attribute { - attrs := (*object.Object)(o). - GetHeader(). - GetAttributes() - - res := make([]Attribute, 0, len(attrs)) - - for i := range attrs { - if !strings.HasPrefix(attrs[i].GetKey(), object.SysAttributePrefix) { - res = append(res, *NewAttributeFromV2(&attrs[i])) - } - } - - return res -} - -// SetAttributes sets object attributes. -func (o *Object) SetAttributes(v ...Attribute) { - attrs := make([]object.Attribute, len(v)) - - for i := range v { - attrs[i] = *v[i].ToV2() - } - - o.setHeaderField(func(h *object.Header) { - h.SetAttributes(attrs) - }) +// See also [Object.Unmarshal]. +func (o Object) Marshal() []byte { + var m object.Object + o.WriteToV2(&m) + b := make([]byte, m.MarshaledSize()) + m.MarshalStable(b) + return b } -// PreviousID returns identifier of the previous sibling object. +// Unmarshal decodes Protocol Buffers V3 binary data into the Object. Returns an +// error describing a format violation of the specified fields. Unmarshal does +// not check presence of the required fields and, at the same time, checks +// format of presented fields. // -// See also [Object.SetPreviousID]. -func (o *Object) PreviousID() (v oid.ID, isSet bool) { - v2 := (*object.Object)(o) - - v2Prev := v2.GetHeader().GetSplit().GetPrevious() - if v2Prev != nil { - err := v.ReadFromV2(*v2Prev) - isSet = (err == nil) +// See also [Object.Marshal]. +func (o *Object) Unmarshal(data []byte) error { + var m object.Object + err := proto.Unmarshal(data, &m) + if err != nil { + return fmt.Errorf("decode protobuf: %w", err) } - - return + return o.readFromV2(&m, false) } -// ResetPreviousID resets identifier of the previous sibling object. +// MarshalJSON encodes Object into a JSON format of the NeoFS API protocol +// (Protocol Buffers V3 JSON). // -// See also [Object.SetPreviousID], [Object.PreviousID]. -func (o *Object) ResetPreviousID() { - o.setSplitFields(func(split *object.SplitHeader) { - split.SetPrevious(nil) - }) -} - -// SetPreviousID sets identifier of the previous sibling object. -// -// See also [Object.PreviousID]. -func (o *Object) SetPreviousID(v oid.ID) { - var v2 refs.ObjectID - v.WriteToV2(&v2) - - o.setSplitFields(func(split *object.SplitHeader) { - split.SetPrevious(&v2) - }) +// See also [Object.UnmarshalJSON]. +func (o Object) MarshalJSON() ([]byte, error) { + var m object.Object + o.WriteToV2(&m) + return protojson.Marshal(&m) } -// Children return list of the identifiers of the child objects. +// UnmarshalJSON decodes NeoFS API protocol JSON data into the Object (Protocol +// Buffers V3 JSON). Returns an error describing a format violation. +// UnmarshalJSON does not check presence of the required fields and, at the same +// time, checks format of presented fields. // -// See also [Object.SetChildren]. -func (o *Object) Children() []oid.ID { - v2 := (*object.Object)(o) - ids := v2.GetHeader().GetSplit().GetChildren() - - var ( - id oid.ID - res = make([]oid.ID, len(ids)) - ) - - for i := range ids { - _ = id.ReadFromV2(ids[i]) - res[i] = id +// See also [Object.MarshalJSON]. +func (o *Object) UnmarshalJSON(data []byte) error { + var m object.Object + err := protojson.Unmarshal(data, &m) + if err != nil { + return fmt.Errorf("decode protojson: %w", err) } - - return res + return o.readFromV2(&m, false) } -// SetChildren sets list of the identifiers of the child objects. +// ID returns object identifier. Zero return indicates ID absence. // -// See also [Object.Children]. -func (o *Object) SetChildren(v ...oid.ID) { - var ( - v2 refs.ObjectID - ids = make([]refs.ObjectID, len(v)) - ) - - for i := range v { - v[i].WriteToV2(&v2) - ids[i] = v2 +// See also [Object.SetID]. +func (o Object) ID() oid.ID { + if o.idSet { + return o.id } - - o.setSplitFields(func(split *object.SplitHeader) { - split.SetChildren(ids) - }) + return oid.ID{} } -// SetFirstID sets the first part's ID of the object's -// split chain. +// SetID sets object identifier. // -// See also [Object.FirstID]. -func (o *Object) SetFirstID(id oid.ID) { - var v2 refs.ObjectID - id.WriteToV2(&v2) - - o.setSplitFields(func(split *object.SplitHeader) { - split.SetFirst(&v2) - }) +// See also [Object.ID], [Object.ResetID]. +func (o *Object) SetID(v oid.ID) { + o.id, o.idSet = v, true } -// FirstID returns the first part of the object's split chain. +// Signature returns object signature. Zero-scheme return indicates signature +// absence. // -// See also [Object.SetFirstID]. -func (o *Object) FirstID() (v oid.ID, isSet bool) { - v2 := (*object.Object)(o) - - v2First := v2.GetHeader().GetSplit().GetFirst() - if v2First != nil { - err := v.ReadFromV2(*v2First) - isSet = (err == nil) +// See also [Object.SetSignature]. +func (o Object) Signature() neofscrypto.Signature { + if o.sigSet { + return o.sig } - - return + return neofscrypto.Signature{} } -// SplitID return split identity of split object. If object is not split returns nil. +// SetSignature sets object signature. // -// See also [Object.SetSplitID]. -func (o *Object) SplitID() *SplitID { - return NewSplitIDFromV2( - (*object.Object)(o). - GetHeader(). - GetSplit(). - GetSplitID(), - ) +// See also [Object.Signature], [Sign], [VerifySignature]. +func (o *Object) SetSignature(sig neofscrypto.Signature) { + o.sig, o.sigSet = sig, true } -// SetSplitID sets split identifier for the split object. +// SignedData returns signed data of the given Object. // -// See also [Object.SplitID]. -func (o *Object) SetSplitID(id *SplitID) { - o.setSplitFields(func(split *object.SplitHeader) { - split.SetSplitID(id.ToV2()) - }) -} - -// ParentID returns identifier of the parent object. -// -// See also [Object.SetParentID]. -func (o *Object) ParentID() (v oid.ID, isSet bool) { - v2 := (*object.Object)(o) - - v2Par := v2.GetHeader().GetSplit().GetParent() - if v2Par != nil { - err := v.ReadFromV2(*v2Par) - isSet = (err == nil) +// See also [Sign]. +func SignedData(obj Object) []byte { + var m refs.ObjectID + id := obj.ID() + if id != [sha256.Size]byte{} { + id.WriteToV2(&m) } - - return + b := make([]byte, m.MarshaledSize()) + m.MarshalStable(b) + return b } -// SetParentID sets identifier of the parent object. +// Sign calculates and sets signature of the given object using provided signer. // -// See also [Object.ParentID]. -func (o *Object) SetParentID(v oid.ID) { - var v2 refs.ObjectID - v.WriteToV2(&v2) - - o.setSplitFields(func(split *object.SplitHeader) { - split.SetParent(&v2) - }) -} - -// ResetParentID removes identifier of the parent object. -// -// See also [Object.SetParentID]. -func (o *Object) ResetParentID() { - o.setSplitFields(func(split *object.SplitHeader) { - split.SetParent(nil) - }) -} - -// Parent returns parent object w/o payload. -// -// See also [Object.SetParent]. -func (o *Object) Parent() *Object { - h := (*object.Object)(o). - GetHeader(). - GetSplit() - - parSig := h.GetParentSignature() - parHdr := h.GetParentHeader() - - if parSig == nil && parHdr == nil { - return nil +// See also [VerifySignature], [Object.SetSignature]. +func Sign(obj *Object, signer neofscrypto.Signer) error { + var sig neofscrypto.Signature + err := sig.Calculate(signer, SignedData(*obj)) + if err == nil { + obj.SetSignature(sig) } - - oV2 := new(object.Object) - oV2.SetObjectID(h.GetParent()) - oV2.SetSignature(parSig) - oV2.SetHeader(parHdr) - - return NewFromV2(oV2) -} - -// SetParent sets parent object w/o payload. -// -// See also [Object.Parent]. -func (o *Object) SetParent(v *Object) { - o.setSplitFields(func(split *object.SplitHeader) { - split.SetParent((*object.Object)(v).GetObjectID()) - split.SetParentSignature((*object.Object)(v).GetSignature()) - split.SetParentHeader((*object.Object)(v).GetHeader()) - }) -} - -func (o *Object) initRelations() { - o.setHeaderField(func(h *object.Header) { - h.SetSplit(new(object.SplitHeader)) - }) + return err } -func (o *Object) resetRelations() { - o.setHeaderField(func(h *object.Header) { - h.SetSplit(nil) - }) -} - -// SessionToken returns token of the session within which object was created. +// VerifySignature checks whether signature of the given object is presented and +// valid. // -// See also [Object.SetSessionToken]. -func (o *Object) SessionToken() *session.Object { - tokv2 := (*object.Object)(o).GetHeader().GetSessionToken() - if tokv2 == nil { - return nil - } - - var res session.Object - - _ = res.ReadFromV2(*tokv2) - - return &res +// See also [Sign], [Object.Signature]. +func VerifySignature(obj Object) bool { + sig := obj.Signature() + return sig.Scheme() != 0 && sig.Verify(SignedData(obj)) } -// SetSessionToken sets token of the session within which object was created. -// -// See also [Object.SessionToken]. -func (o *Object) SetSessionToken(v *session.Object) { - o.setHeaderField(func(h *object.Header) { - var tokv2 *v2session.Token - - if v != nil { - tokv2 = new(v2session.Token) - v.WriteToV2(tokv2) - } - - h.SetSessionToken(tokv2) - }) -} - -// Type returns type of the object. -// -// See also [Object.SetType]. -func (o *Object) Type() Type { - return TypeFromV2( - (*object.Object)(o). - GetHeader(). - GetObjectType(), - ) -} - -// SetType sets type of the object. -// -// See also [Object.Type]. -func (o *Object) SetType(v Type) { - o.setHeaderField(func(h *object.Header) { - h.SetObjectType(v.ToV2()) - }) -} - -// CutPayload returns [Object] w/ empty payload. +// Payload returns payload bytes. // // The value returned shares memory with the structure itself, so changing it can lead to data corruption. // Make a copy if you need to change it. -func (o *Object) CutPayload() *Object { - ov2 := new(object.Object) - *ov2 = *(*object.Object)(o) - ov2.SetPayload(nil) - - return (*Object)(ov2) -} - -// HasParent checks if parent (split ID) is present. -func (o *Object) HasParent() bool { - return (*object.Object)(o). - GetHeader(). - GetSplit() != nil -} - -// ResetRelations removes all fields of links with other objects. -func (o *Object) ResetRelations() { - o.resetRelations() -} - -// InitRelations initializes relation field. -func (o *Object) InitRelations() { - o.initRelations() -} - -// Marshal marshals object into a protobuf binary form. -// -// See also [Object.Unmarshal]. -func (o *Object) Marshal() ([]byte, error) { - return (*object.Object)(o).StableMarshal(nil), nil -} - -// Unmarshal unmarshals protobuf binary representation of object. -// -// See also [Object.Marshal]. -func (o *Object) Unmarshal(data []byte) error { - err := (*object.Object)(o).Unmarshal(data) - if err != nil { - return err - } - - return formatCheck((*object.Object)(o)) -} - -// MarshalJSON encodes object to protobuf JSON format. // -// See also [Object.UnmarshalJSON]. -func (o *Object) MarshalJSON() ([]byte, error) { - return (*object.Object)(o).MarshalJSON() +// See also [Object.SetPayload]. +func (o Object) Payload() []byte { + return o.payload } -// UnmarshalJSON decodes object from protobuf JSON format. +// SetPayload sets payload bytes. // -// See also [Object.MarshalJSON]. -func (o *Object) UnmarshalJSON(data []byte) error { - err := (*object.Object)(o).UnmarshalJSON(data) - if err != nil { - return err - } - - return formatCheck((*object.Object)(o)) -} - -var errOIDNotSet = errors.New("object ID is not set") -var errCIDNotSet = errors.New("container ID is not set") - -func formatCheck(v2 *object.Object) error { - var ( - oID oid.ID - cID cid.ID - ) - - oidV2 := v2.GetObjectID() - if oidV2 == nil { - return errOIDNotSet - } - - err := oID.ReadFromV2(*oidV2) - if err != nil { - return fmt.Errorf("could not convert V2 object ID: %w", err) - } - - cidV2 := v2.GetHeader().GetContainerID() - if cidV2 == nil { - return errCIDNotSet - } - - err = cID.ReadFromV2(*cidV2) - if err != nil { - return fmt.Errorf("could not convert V2 container ID: %w", err) - } - - if prev := v2.GetHeader().GetSplit().GetPrevious(); prev != nil { - err = oID.ReadFromV2(*prev) - if err != nil { - return fmt.Errorf("could not convert previous object ID: %w", err) - } - } - - if parent := v2.GetHeader().GetSplit().GetParent(); parent != nil { - err = oID.ReadFromV2(*parent) - if err != nil { - return fmt.Errorf("could not convert parent object ID: %w", err) - } - } - - return nil -} - -// HeaderLen returns length of the binary header. -func (o *Object) HeaderLen() int { - return (*object.Object)(o).GetHeader().StableSize() +// See also [Object.Payload]. +func (o *Object) SetPayload(v []byte) { + o.payload = v } diff --git a/object/object_copy.go b/object/object_copy.go deleted file mode 100644 index 9771a4765..000000000 --- a/object/object_copy.go +++ /dev/null @@ -1,173 +0,0 @@ -package object - -import ( - "bytes" - - "github.com/nspcc-dev/neofs-api-go/v2/object" - "github.com/nspcc-dev/neofs-api-go/v2/refs" - v2session "github.com/nspcc-dev/neofs-api-go/v2/session" -) - -func copyObjectID(id *refs.ObjectID) *refs.ObjectID { - if id == nil { - return nil - } - - var newID refs.ObjectID - newID.SetValue(bytes.Clone(id.GetValue())) - - return &newID -} - -func copySignature(sig *refs.Signature) *refs.Signature { - if sig == nil { - return nil - } - - var newSig refs.Signature - newSig.SetScheme(sig.GetScheme()) - newSig.SetKey(bytes.Clone(sig.GetKey())) - newSig.SetSign(bytes.Clone(sig.GetSign())) - - return &newSig -} - -func copySession(session *v2session.Token) *v2session.Token { - if session == nil { - return nil - } - - var newSession v2session.Token - if body := session.GetBody(); body != nil { - var newBody v2session.TokenBody - newBody.SetID(bytes.Clone(body.GetID())) - - if ownerID := body.GetOwnerID(); ownerID != nil { - var newOwnerID refs.OwnerID - newOwnerID.SetValue(bytes.Clone(ownerID.GetValue())) - - newBody.SetOwnerID(&newOwnerID) - } else { - newBody.SetOwnerID(nil) - } - - if lifetime := body.GetLifetime(); lifetime != nil { - newLifetime := *lifetime - newBody.SetLifetime(&newLifetime) - } else { - newBody.SetLifetime(nil) - } - - newBody.SetSessionKey(bytes.Clone(body.GetSessionKey())) - - // it is an interface. Both implementations do nothing inside implemented functions. - newBody.SetContext(body.GetContext()) - - newSession.SetBody(&newBody) - } else { - newSession.SetBody(nil) - } - - newSession.SetSignature(copySignature(session.GetSignature())) - - return &newSession -} - -func copySplitHeader(spl *object.SplitHeader) *object.SplitHeader { - if spl == nil { - return nil - } - - var newSpl object.SplitHeader - - newSpl.SetParent(copyObjectID(spl.GetParent())) - newSpl.SetPrevious(copyObjectID(spl.GetPrevious())) - newSpl.SetFirst(copyObjectID(spl.GetFirst())) - newSpl.SetParentSignature(copySignature(spl.GetParentSignature())) - newSpl.SetParentHeader(copyHeader(spl.GetParentHeader())) - - if children := spl.GetChildren(); children != nil { - newChildren := make([]refs.ObjectID, len(children)) - copy(newChildren, children) - - newSpl.SetChildren(newChildren) - } else { - newSpl.SetChildren(nil) - } - - newSpl.SetSplitID(bytes.Clone(spl.GetSplitID())) - - return &newSpl -} - -func copyHeader(header *object.Header) *object.Header { - if header == nil { - return nil - } - - var newHeader object.Header - - newHeader.SetCreationEpoch(header.GetCreationEpoch()) - newHeader.SetPayloadLength(header.GetPayloadLength()) - newHeader.SetObjectType(header.GetObjectType()) - - if ver := header.GetVersion(); ver != nil { - newVer := *ver - newHeader.SetVersion(&newVer) - } else { - newHeader.SetVersion(nil) - } - - if containerID := header.GetContainerID(); containerID != nil { - var newContainerID refs.ContainerID - newContainerID.SetValue(bytes.Clone(containerID.GetValue())) - - newHeader.SetContainerID(&newContainerID) - } else { - newHeader.SetContainerID(nil) - } - - if ownerID := header.GetOwnerID(); ownerID != nil { - var newOwnerID refs.OwnerID - newOwnerID.SetValue(bytes.Clone(ownerID.GetValue())) - - newHeader.SetOwnerID(&newOwnerID) - } else { - newHeader.SetOwnerID(nil) - } - - if payloadHash := header.GetPayloadHash(); payloadHash != nil { - var newPayloadHash refs.Checksum - newPayloadHash.SetType(payloadHash.GetType()) - newPayloadHash.SetSum(bytes.Clone(payloadHash.GetSum())) - - newHeader.SetPayloadHash(&newPayloadHash) - } else { - newHeader.SetPayloadHash(nil) - } - - if homoHash := header.GetHomomorphicHash(); homoHash != nil { - var newHomoHash refs.Checksum - newHomoHash.SetType(homoHash.GetType()) - newHomoHash.SetSum(bytes.Clone(homoHash.GetSum())) - - newHeader.SetHomomorphicHash(&newHomoHash) - } else { - newHeader.SetHomomorphicHash(nil) - } - - newHeader.SetSessionToken(copySession(header.GetSessionToken())) - - if attrs := header.GetAttributes(); attrs != nil { - newAttributes := make([]object.Attribute, len(attrs)) - copy(newAttributes, attrs) - - newHeader.SetAttributes(newAttributes) - } else { - newHeader.SetAttributes(nil) - } - - newHeader.SetSplit(copySplitHeader(header.GetSplit())) - - return &newHeader -} diff --git a/object/object_internal_test.go b/object/object_internal_test.go deleted file mode 100644 index dc8a695d3..000000000 --- a/object/object_internal_test.go +++ /dev/null @@ -1,356 +0,0 @@ -package object - -import ( - "bytes" - "testing" - - "github.com/google/uuid" - "github.com/nspcc-dev/neofs-api-go/v2/object" - "github.com/nspcc-dev/neofs-api-go/v2/refs" - cid "github.com/nspcc-dev/neofs-sdk-go/container/id" - cidtest "github.com/nspcc-dev/neofs-sdk-go/container/id/test" - "github.com/nspcc-dev/neofs-sdk-go/crypto/test" - oidtest "github.com/nspcc-dev/neofs-sdk-go/object/id/test" - "github.com/nspcc-dev/neofs-sdk-go/session" - "github.com/nspcc-dev/neofs-sdk-go/user" - usertest "github.com/nspcc-dev/neofs-sdk-go/user/test" - "github.com/nspcc-dev/neofs-sdk-go/version" - "github.com/stretchr/testify/require" -) - -func sessionToken(cnr cid.ID) *session.Object { - var sess session.Object - sess.SetID(uuid.New()) - sess.ForVerb(session.VerbObjectPut) - sess.BindContainer(cnr) - - return &sess -} - -func parenObject(cnr cid.ID, owner user.ID) *Object { - var obj Object - - obj.InitCreation(RequiredFields{ - Container: cnr, - Owner: owner, - }) - - return &obj -} - -func TestObject_CopyTo(t *testing.T) { - signer := test.RandomSignerRFC6979(t) - - var obj Object - cnr := cidtest.ID() - own := usertest.ID(t) - - obj.InitCreation(RequiredFields{ - Container: cnr, - Owner: own, - }) - - var attr Attribute - attr.SetKey("key") - attr.SetValue("value") - - obj.SetAttributes(attr) - obj.SetPayload([]byte{1, 2, 3}) - obj.SetSessionToken(sessionToken(cnr)) - obj.SetCreationEpoch(10) - obj.SetParent(parenObject(cnr, own)) - obj.SetChildren(oidtest.ID(), oidtest.ID(), oidtest.ID()) - - var splitID SplitID - splitID.SetUUID(uuid.New()) - obj.SetSplitID(&splitID) - - v := version.Current() - obj.SetVersion(&v) - - require.NoError(t, obj.CalculateAndSetID()) - require.NoError(t, obj.Sign(signer)) - - t.Run("copy", func(t *testing.T) { - var dst Object - obj.CopyTo(&dst) - - checkObjectEquals(t, obj, dst) - }) - - t.Run("change id", func(t *testing.T) { - var dst Object - obj.CopyTo(&dst) - - dstHeader := dst.ToV2().GetHeader() - require.NotNil(t, dstHeader) - dstHeader.SetObjectType(object.TypeTombstone) - - objHeader := obj.ToV2().GetHeader() - - require.NotEqual(t, dstHeader.GetObjectType(), objHeader.GetObjectType()) - }) - - t.Run("overwrite id", func(t *testing.T) { - var local Object - _, isSet := local.ID() - require.False(t, isSet) - - var dst Object - require.NoError(t, dst.CalculateAndSetID()) - _, isSet = dst.ID() - require.True(t, isSet) - - local.CopyTo(&dst) - - _, isSet = local.ID() - require.False(t, isSet) - _, isSet = dst.ID() - require.False(t, isSet) - - checkObjectEquals(t, local, dst) - - require.NoError(t, dst.CalculateAndSetID()) - _, isSet = dst.ID() - require.True(t, isSet) - - _, isSet = local.ID() - require.False(t, isSet) - }) - - t.Run("change payload", func(t *testing.T) { - var dst Object - obj.CopyTo(&dst) - - require.True(t, bytes.Equal(dst.Payload(), obj.Payload())) - - p := dst.Payload() - p[0] = 4 - - require.False(t, bytes.Equal(dst.Payload(), obj.Payload())) - }) - - t.Run("overwrite signature", func(t *testing.T) { - var local Object - require.Nil(t, local.Signature()) - - var dst Object - require.NoError(t, dst.CalculateAndSetID()) - require.NoError(t, dst.Sign(signer)) - require.NotNil(t, dst.Signature()) - - local.CopyTo(&dst) - require.Nil(t, local.Signature()) - require.Nil(t, dst.Signature()) - - checkObjectEquals(t, local, dst) - - require.NoError(t, dst.CalculateAndSetID()) - require.NoError(t, dst.Sign(signer)) - require.NotNil(t, dst.Signature()) - require.Nil(t, local.Signature()) - }) - - t.Run("overwrite header", func(t *testing.T) { - var local Object - require.Nil(t, local.ToV2().GetHeader()) - - var dst Object - dst.SetAttributes(attr) - require.NotNil(t, dst.ToV2().GetHeader()) - - local.CopyTo(&dst) - checkObjectEquals(t, local, dst) - - require.Nil(t, local.ToV2().GetHeader()) - require.Nil(t, dst.ToV2().GetHeader()) - - dst.SetAttributes(attr) - require.NotNil(t, dst.ToV2().GetHeader()) - require.Nil(t, local.ToV2().GetHeader()) - }) - - t.Run("header, rewrite container id to nil", func(t *testing.T) { - var local Object - var localHeader object.Header - local.ToV2().SetHeader(&localHeader) - - var dstContID refs.ContainerID - dstContID.SetValue([]byte{1}) - - var dstHeader object.Header - dstHeader.SetContainerID(&dstContID) - - var dst Object - dst.ToV2().SetHeader(&dstHeader) - - local.CopyTo(&dst) - checkObjectEquals(t, local, dst) - }) - - t.Run("header, change container id", func(t *testing.T) { - var localContID refs.ContainerID - localContID.SetValue([]byte{1}) - - var localHeader object.Header - localHeader.SetContainerID(&localContID) - - var local Object - local.ToV2().SetHeader(&localHeader) - - var dstContID refs.ContainerID - dstContID.SetValue([]byte{2}) - - var dstHeader object.Header - dstHeader.SetContainerID(&dstContID) - - var dst Object - dst.ToV2().SetHeader(&dstHeader) - - require.NotEqual(t, local.ToV2().GetHeader().GetContainerID(), dst.ToV2().GetHeader().GetContainerID()) - - local.CopyTo(&dst) - checkObjectEquals(t, local, dst) - - local.ToV2().GetHeader().GetContainerID().SetValue([]byte{3}) - require.NotEqual(t, local.ToV2().GetHeader().GetContainerID(), dst.ToV2().GetHeader().GetContainerID()) - }) - - t.Run("header, rewrite payload hash", func(t *testing.T) { - var cs refs.Checksum - cs.SetType(refs.TillichZemor) - cs.SetSum([]byte{1}) - - var localHeader object.Header - localHeader.SetPayloadHash(&cs) - - var local Object - local.ToV2().SetHeader(&localHeader) - - var dst Object - local.CopyTo(&dst) - - checkObjectEquals(t, local, dst) - }) - - t.Run("header, rewrite homo hash", func(t *testing.T) { - var cs refs.Checksum - cs.SetType(refs.TillichZemor) - cs.SetSum([]byte{1}) - - var localHeader object.Header - localHeader.SetHomomorphicHash(&cs) - - var local Object - local.ToV2().SetHeader(&localHeader) - - var dst Object - local.CopyTo(&dst) - - checkObjectEquals(t, local, dst) - }) - - t.Run("header, rewrite split header", func(t *testing.T) { - var spl object.SplitHeader - - var localHeader object.Header - localHeader.SetSplit(&spl) - - var local Object - local.ToV2().SetHeader(&localHeader) - - var dst Object - dst.SetChildren(oidtest.ID(), oidtest.ID()) - - local.CopyTo(&dst) - checkObjectEquals(t, local, dst) - }) - - t.Run("header, set session owner", func(t *testing.T) { - var local Object - sess := sessionToken(cnr) - sess.SetIssuer(signer.UserID()) - - local.SetSessionToken(sess) - - var dst Object - - require.NotEqual(t, - local.ToV2().GetHeader().GetSessionToken().GetBody().GetOwnerID(), - dst.ToV2().GetHeader().GetSessionToken().GetBody().GetOwnerID(), - ) - - local.CopyTo(&dst) - checkObjectEquals(t, local, dst) - }) - - t.Run("header, set session owner to nil", func(t *testing.T) { - var local Object - local.SetSessionToken(sessionToken(cnr)) - - sess := sessionToken(cnr) - sess.SetIssuer(signer.UserID()) - - var dst Object - dst.SetSessionToken(sess) - - require.NotEqual(t, - local.ToV2().GetHeader().GetSessionToken().GetBody().GetOwnerID(), - dst.ToV2().GetHeader().GetSessionToken().GetBody().GetOwnerID(), - ) - - local.CopyTo(&dst) - checkObjectEquals(t, local, dst) - }) - - t.Run("header, set session lifetime", func(t *testing.T) { - var local Object - sess := sessionToken(cnr) - sess.SetExp(1234) - - local.SetSessionToken(sess) - - var dst Object - - require.NotEqual(t, - local.ToV2().GetHeader().GetSessionToken().GetBody().GetLifetime(), - dst.ToV2().GetHeader().GetSessionToken().GetBody().GetLifetime(), - ) - - local.CopyTo(&dst) - checkObjectEquals(t, local, dst) - }) - - t.Run("header, overwrite session body", func(t *testing.T) { - var local Object - sessLocal := sessionToken(cnr) - local.SetSessionToken(sessLocal) - - local.ToV2().GetHeader().GetSessionToken().SetBody(nil) - - sessDst := sessionToken(cnr) - sessDst.SetID(uuid.New()) - - var dst Object - dst.SetSessionToken(sessDst) - - require.NotEqual(t, - local.ToV2().GetHeader().GetSessionToken().GetBody(), - dst.ToV2().GetHeader().GetSessionToken().GetBody(), - ) - - local.CopyTo(&dst) - checkObjectEquals(t, local, dst) - }) -} - -func checkObjectEquals(t *testing.T, local, dst Object) { - bts, err := local.Marshal() - require.NoError(t, err) - - bts2, err := dst.Marshal() - require.NoError(t, err) - - require.Equal(t, local, dst) - require.True(t, bytes.Equal(bts, bts2)) -} diff --git a/object/object_test.go b/object/object_test.go index 0dea72b94..7a80bbbc8 100644 --- a/object/object_test.go +++ b/object/object_test.go @@ -1,68 +1,5 @@ package object_test -import ( - "strconv" - "testing" - - cidtest "github.com/nspcc-dev/neofs-sdk-go/container/id/test" - "github.com/nspcc-dev/neofs-sdk-go/object" - usertest "github.com/nspcc-dev/neofs-sdk-go/user/test" - "github.com/stretchr/testify/require" -) - -func TestInitCreation(t *testing.T) { - var o object.Object - cnr := cidtest.ID() - own := usertest.ID(t) - - o.InitCreation(object.RequiredFields{ - Container: cnr, - Owner: own, - }) - - cID, set := o.ContainerID() - require.True(t, set) - require.Equal(t, cnr, cID) - require.Equal(t, &own, o.OwnerID()) -} - -func TestObject_UserAttributes(t *testing.T) { - var obj object.Object - var attrs []object.Attribute - mSys := make(map[string]string) - mUsr := make(map[string]string) - - for i := 0; i < 10; i++ { - si := strconv.Itoa(i) - - keyUsr := "key" + si - valUsr := "val" + si - keySys := "__NEOFS__" + si - valSys := "sys-val" + si - - mUsr[keyUsr] = valUsr - mSys[keySys] = valSys - - var aUsr object.Attribute - aUsr.SetKey(keyUsr) - aUsr.SetValue(valUsr) - - var aSys object.Attribute - aSys.SetKey(keySys) - aSys.SetValue(valSys) - - attrs = append(attrs, aSys, aUsr) - } - - obj.SetAttributes(attrs...) - - for _, a := range obj.UserAttributes() { - key, val := a.Key(), a.Value() - _, isSys := mSys[key] - require.False(t, isSys, key) - require.Equal(t, mUsr[key], val, key) - delete(mUsr, key) - } - - require.Empty(t, mUsr) -} +// FIXME: negative enum values in gRPC +// FIXME: enum conversions overflow +// TODO diff --git a/object/range.go b/object/range.go index 98eb77102..90f4bbb88 100644 --- a/object/range.go +++ b/object/range.go @@ -1,62 +1,4 @@ package object -import ( - "github.com/nspcc-dev/neofs-api-go/v2/object" -) - -// Range represents v2 [object.Range] object payload range. -type Range object.Range - -// NewRangeFromV2 wraps v2 [object.Range] message to [Range]. -// -// Nil [object.Range] converts to nil. -func NewRangeFromV2(rV2 *object.Range) *Range { - return (*Range)(rV2) -} - -// NewRange creates and initializes blank [Range]. -// -// Defaults: -// - offset: 0; -// - length: 0. -func NewRange() *Range { - return NewRangeFromV2(new(object.Range)) -} - -// ToV2 converts [Range] to v2 [object.Range] message. -// -// Nil [Range] converts to nil. -// -// The value returned shares memory with the structure itself, so changing it can lead to data corruption. -// Make a copy if you need to change it. -func (r *Range) ToV2() *object.Range { - return (*object.Range)(r) -} - -// GetLength returns payload range size. -// -// See also [Range.SetLength]. -func (r *Range) GetLength() uint64 { - return (*object.Range)(r).GetLength() -} - -// SetLength sets payload range size. -// -// See also [Range.GetLength]. -func (r *Range) SetLength(v uint64) { - (*object.Range)(r).SetLength(v) -} - -// GetOffset sets payload range offset from start. -// -// See also [Range.SetOffset]. -func (r *Range) GetOffset() uint64 { - return (*object.Range)(r).GetOffset() -} - -// SetOffset gets payload range offset from start. -// -// See also [Range.GetOffset]. -func (r *Range) SetOffset(v uint64) { - (*object.Range)(r).SetOffset(v) -} +// Range describes a continuous range of data. +type Range struct{ Offset, Length uint64 } diff --git a/object/range_test.go b/object/range_test.go index e14d3600d..4886d3219 100644 --- a/object/range_test.go +++ b/object/range_test.go @@ -1,58 +1,3 @@ -package object +package object_test -import ( - "testing" - - "github.com/nspcc-dev/neofs-api-go/v2/object" - "github.com/stretchr/testify/require" -) - -func TestRange_SetOffset(t *testing.T) { - r := NewRange() - - off := uint64(13) - r.SetOffset(off) - - require.Equal(t, off, r.GetOffset()) -} - -func TestRange_SetLength(t *testing.T) { - r := NewRange() - - ln := uint64(7) - r.SetLength(ln) - - require.Equal(t, ln, r.GetLength()) -} - -func TestNewRangeFromV2(t *testing.T) { - t.Run("from nil", func(t *testing.T) { - var x *object.Range - - require.Nil(t, NewRangeFromV2(x)) - }) -} - -func TestRange_ToV2(t *testing.T) { - t.Run("nil", func(t *testing.T) { - var x *Range - - require.Nil(t, x.ToV2()) - }) -} - -func TestNewRange(t *testing.T) { - t.Run("default values", func(t *testing.T) { - r := NewRange() - - // check initial values - require.Zero(t, r.GetLength()) - require.Zero(t, r.GetOffset()) - - // convert to v2 message - rV2 := r.ToV2() - - require.Zero(t, rV2.GetLength()) - require.Zero(t, rV2.GetOffset()) - }) -} +// TODO diff --git a/object/search.go b/object/search.go index ca7efa62b..fc092e9ca 100644 --- a/object/search.go +++ b/object/search.go @@ -1,142 +1,81 @@ package object import ( - "crypto/sha256" - "encoding/hex" - "encoding/json" + "errors" + "fmt" "strconv" "strings" + "time" - v2object "github.com/nspcc-dev/neofs-api-go/v2/object" - cid "github.com/nspcc-dev/neofs-sdk-go/container/id" + apiobject "github.com/nspcc-dev/neofs-sdk-go/api/object" oid "github.com/nspcc-dev/neofs-sdk-go/object/id" "github.com/nspcc-dev/neofs-sdk-go/user" - "github.com/nspcc-dev/neofs-sdk-go/version" - "github.com/nspcc-dev/tzhash/tz" ) -// SearchMatchType indicates match operation on specified header. -type SearchMatchType uint32 +// FilterOp defines the matching property. +type FilterOp uint32 +// Supported FilterOp values. const ( - MatchUnknown SearchMatchType = iota - MatchStringEqual - MatchStringNotEqual - MatchNotPresent - MatchCommonPrefix - MatchNumGT - MatchNumGE - MatchNumLT - MatchNumLE + _ FilterOp = iota + FilterOpEQ // String 'equal' + FilterOpNE // String 'not equal' + FilterOpNotPresent // Missing property + FilterOpCommonPrefix // Prefix matches in strings + FilterOpGT // Numeric 'greater than' + FilterOpGE // Numeric 'greater or equal than' + FilterOpLT // Numeric 'less than' + FilterOpLE // Numeric 'less or equal than' ) -// ToV2 converts [SearchMatchType] to v2 [v2object.MatchType] enum value. -func (m SearchMatchType) ToV2() v2object.MatchType { - switch m { - case - MatchStringEqual, - MatchStringNotEqual, - MatchNotPresent, - MatchCommonPrefix, - MatchNumGT, - MatchNumGE, - MatchNumLT, - MatchNumLE: - return v2object.MatchType(m) - default: - return v2object.MatchUnknown - } -} - -// SearchMatchFromV2 converts v2 [v2object.MatchType] to [SearchMatchType] enum value. -func SearchMatchFromV2(t v2object.MatchType) SearchMatchType { - switch t { - case - v2object.MatchStringEqual, - v2object.MatchStringNotEqual, - v2object.MatchNotPresent, - v2object.MatchCommonPrefix, - v2object.MatchNumGT, - v2object.MatchNumGE, - v2object.MatchNumLT, - v2object.MatchNumLE: - return SearchMatchType(t) - default: - return MatchUnknown - } -} - -// EncodeToString returns string representation of [SearchMatchType]. -// -// String mapping: -// - [MatchStringEqual]: STRING_EQUAL; -// - [MatchStringNotEqual]: STRING_NOT_EQUAL; -// - [MatchNotPresent]: NOT_PRESENT; -// - [MatchCommonPrefix]: COMMON_PREFIX; -// - [MatchNumGT], default: NUM_GT; -// - [MatchNumGE], default: NUM_GE; -// - [MatchNumLT], default: NUM_LT; -// - [MatchNumLE], default: NUM_LE; -// - [MatchUnknown], default: MATCH_TYPE_UNSPECIFIED. -func (m SearchMatchType) EncodeToString() string { - return m.ToV2().String() -} - // String implements [fmt.Stringer]. // // String is designed to be human-readable, and its format MAY differ between -// SDK versions. String MAY return same result as [EncodeToString]. String MUST NOT -// be used to encode ID into NeoFS protocol string. -func (m SearchMatchType) String() string { - return m.EncodeToString() -} - -// DecodeString parses [SearchMatchType] from a string representation. -// It is a reverse action to EncodeToString(). -// -// Returns true if s was parsed successfully. -func (m *SearchMatchType) DecodeString(s string) bool { - var g v2object.MatchType - - ok := g.FromString(s) - - if ok { - *m = SearchMatchFromV2(g) +// SDK versions. +func (x FilterOp) String() string { + switch x { + default: + return fmt.Sprintf("UNKNOWN#%d", x) + case FilterOpEQ: + return "STRING_EQUAL" + case FilterOpNE: + return "STRING_NOT_EQUAL" + case FilterOpNotPresent: + return "NOT_PRESENT" + case FilterOpCommonPrefix: + return "COMMON_PREFIX" + case FilterOpGT: + return "NUMERIC_GT" + case FilterOpGE: + return "NUMERIC_GE" + case FilterOpLT: + return "NUMERIC_LT" + case FilterOpLE: + return "NUMERIC_LE" } - - return ok } -type stringEncoder interface { - EncodeToString() string -} - -// SearchFilter describes a single filter record. +// SearchFilter describes object property filter. type SearchFilter struct { - header string - value stringEncoder - op SearchMatchType + key string + value string + op FilterOp } -type staticStringer string - -// SearchFilters is type to describe a group of filters. -type SearchFilters []SearchFilter +const systemFilterPrefix = "$Object:" -// Various header filters. +// Various filters by object header. const ( - FilterVersion = v2object.FilterHeaderVersion - FilterID = v2object.FilterHeaderObjectID - FilterContainerID = v2object.FilterHeaderContainerID - FilterOwnerID = v2object.FilterHeaderOwnerID - FilterPayloadChecksum = v2object.FilterHeaderPayloadHash - FilterType = v2object.FilterHeaderObjectType - FilterPayloadHomomorphicHash = v2object.FilterHeaderHomomorphicHash - FilterParentID = v2object.FilterHeaderParent - FilterSplitID = v2object.FilterHeaderSplitID - FilterFirstSplitObject = v2object.ReservedFilterPrefix + "split.first" - FilterCreationEpoch = v2object.FilterHeaderCreationEpoch - FilterPayloadSize = v2object.FilterHeaderPayloadLength + FilterID = systemFilterPrefix + "objectID" + FilterOwnerID = systemFilterPrefix + "ownerID" + FilterPayloadChecksum = systemFilterPrefix + "payloadHash" + FilterType = systemFilterPrefix + "objectType" + FilterPayloadHomomorphicHash = systemFilterPrefix + "homomorphicHash" + FilterParentID = systemFilterPrefix + "split.parent" + FilterSplitID = systemFilterPrefix + "split.splitID" + FilterFirstSplitObject = systemFilterPrefix + "split.first" + FilterCreationEpoch = systemFilterPrefix + "creationEpoch" + FilterPayloadSize = systemFilterPrefix + "payloadLength" ) // Various filters to match certain object properties. @@ -145,214 +84,152 @@ const ( // with user data that are not system-specific. In addition to such objects, the // system may contain service objects that do not fall under this property // (like split leaves, tombstones, storage groups, etc.). - FilterRoot = v2object.FilterPropertyRoot + FilterRoot = systemFilterPrefix + "ROOT" // FilterPhysical filters indivisible objects that are intended to be stored // on the physical devices of the system. In addition to such objects, the // system may contain so-called "virtual" objects that exist in the system in // disassembled form (like "huge" user object sliced into smaller ones). - FilterPhysical = v2object.FilterPropertyPhy + FilterPhysical = systemFilterPrefix + "PHY" ) -func (s staticStringer) EncodeToString() string { - return string(s) -} - -// Header returns filter header value. -func (f *SearchFilter) Header() string { - return f.header -} - -// Value returns filter value. -func (f *SearchFilter) Value() string { - return f.value.EncodeToString() -} - -// Operation returns filter operation value. -func (f *SearchFilter) Operation() SearchMatchType { - return f.op -} - -// IsNonAttribute checks if SearchFilter is non-attribute: such filter is -// related to the particular property of the object instead of its attribute. -func (f SearchFilter) IsNonAttribute() bool { - return strings.HasPrefix(f.header, v2object.ReservedFilterPrefix) -} - -// NewSearchFilters constructs empty filter group. -func NewSearchFilters() SearchFilters { - return SearchFilters{} -} - -// NewSearchFiltersFromV2 converts slice of [v2object.SearchFilter] to [SearchFilters]. -func NewSearchFiltersFromV2(v2 []v2object.SearchFilter) SearchFilters { - filters := make(SearchFilters, 0, len(v2)) - - for i := range v2 { - filters.AddFilter( - v2[i].GetKey(), - v2[i].GetValue(), - SearchMatchFromV2(v2[i].GetMatchType()), - ) - } - - return filters -} - -func (f *SearchFilters) addFilter(op SearchMatchType, key string, val stringEncoder) { - if *f == nil { - *f = make(SearchFilters, 0, 1) +// ReadFromV2 reads SearchFilter from the apiobject.SearchRequest_Body_Filter +// message. Returns an error if the message is malformed according to the NeoFS +// API V2 protocol. The message must not be nil. +// +// ReadFromV2 is intended to be used by the NeoFS API V2 client/server +// implementation only and is not expected to be directly used by applications. +// +// See also [SearchFilter.WriteToV2]. +func (f *SearchFilter) ReadFromV2(m *apiobject.SearchRequest_Body_Filter) error { + if m.MatchType < 0 { + return errors.New("negative op") + } else if m.Key == "" { + return errors.New("missing key") } - - *f = append(*f, SearchFilter{ - header: key, - value: val, - op: op, - }) + return nil } -// AddFilter adds a filter to group by simple plain parameters. +// WriteToV2 writes SearchFilter to the apiobject.SearchRequest_Body_Filter +// message of the NeoFS API protocol. // -// If op is numeric (like [MatchNumGT]), value must be a base-10 integer. -func (f *SearchFilters) AddFilter(key, value string, op SearchMatchType) { - f.addFilter(op, key, staticStringer(value)) -} - -// addFlagFilters adds filters that works like flags: they don't need to have -// specific match type or value. They processed by NeoFS nodes by the fact -// of presence in search query. E.g.: FilterRoot, FilterPhysical. -func (f *SearchFilters) addFlagFilter(key string) { - f.addFilter(MatchUnknown, key, staticStringer("")) -} - -// AddObjectVersionFilter adds a filter by version. +// WriteToV2 is intended to be used by the NeoFS API V2 client/server +// implementation only and is not expected to be directly used by applications. // -// The op must not be numeric (like [MatchNumGT]). -func (f *SearchFilters) AddObjectVersionFilter(op SearchMatchType, v version.Version) { - f.addFilter(op, FilterVersion, staticStringer(version.EncodeToString(v))) +// See also [SearchFilter.ReadFromV2]. +func (f SearchFilter) WriteToV2(m *apiobject.SearchRequest_Body_Filter) { + m.MatchType = apiobject.MatchType(f.op) + m.Key = f.key + m.Value = f.value } -// AddObjectContainerIDFilter adds a filter by container id. -// -// The m must not be numeric (like [MatchNumGT]). -func (f *SearchFilters) AddObjectContainerIDFilter(m SearchMatchType, id cid.ID) { - f.addFilter(m, FilterContainerID, id) +// Key returns key to the object property. +func (f SearchFilter) Key() string { + return f.key } -// AddObjectOwnerIDFilter adds a filter by object owner id. -// -// The m must not be numeric (like [MatchNumGT]). -func (f *SearchFilters) AddObjectOwnerIDFilter(m SearchMatchType, id user.ID) { - f.addFilter(m, FilterOwnerID, id) +// Value returns filtered property value. +func (f SearchFilter) Value() string { + return f.value } -// ToV2 converts [SearchFilters] to [v2object.SearchFilter] slice. -func (f SearchFilters) ToV2() []v2object.SearchFilter { - result := make([]v2object.SearchFilter, len(f)) - - for i := range f { - result[i].SetKey(f[i].header) - result[i].SetValue(f[i].value.EncodeToString()) - result[i].SetMatchType(f[i].op.ToV2()) - } - - return result +// Operation returns operator to match the property. +func (f SearchFilter) Operation() FilterOp { + return f.op } -func (f *SearchFilters) addRootFilter() { - f.addFlagFilter(FilterRoot) +// IsNonAttribute checks if SearchFilter is non-attribute: such filter is +// related to the particular property of the object instead of its attribute. +func (f SearchFilter) IsNonAttribute() bool { + return strings.HasPrefix(f.key, systemFilterPrefix) } -// AddRootFilter adds filter by objects that have been created by a user explicitly. -func (f *SearchFilters) AddRootFilter() { - f.addRootFilter() +// NewSearchFilter constructs new object search filter instance. Additional +// helper constructors are also available to ease encoding. +func NewSearchFilter(key string, op FilterOp, value string) SearchFilter { + return SearchFilter{ + key: key, + value: value, + op: op, + } } -func (f *SearchFilters) addPhyFilter() { - f.addFlagFilter(FilterPhysical) +// FilterRootObjects returns search filter selecting only root user objects (see +// [FilterRoot] for details). +func FilterRootObjects() SearchFilter { + return NewSearchFilter(FilterRoot, 0, "") } -// AddPhyFilter adds filter by objects that are physically stored in the system. -func (f *SearchFilters) AddPhyFilter() { - f.addPhyFilter() +// FilterPhysicalObjects returns search filter selecting physically stored +// objects only (see [FilterPhysical] for details). +func FilterPhysicalObjects() SearchFilter { + return NewSearchFilter(FilterPhysical, 0, "") } -// AddParentIDFilter adds filter by parent identifier. -// -// The m must not be numeric (like [MatchNumGT]). -func (f *SearchFilters) AddParentIDFilter(m SearchMatchType, id oid.ID) { - f.addFilter(m, FilterParentID, id) +// FilterOwnerIs returns search filter selecting objects owned by given user +// only. Relates to [Header.OwnerID]. +func FilterOwnerIs(usr user.ID) SearchFilter { + return NewSearchFilter(FilterOwnerID, FilterOpEQ, usr.EncodeToString()) } -// AddObjectIDFilter adds filter by object identifier. -// -// The m must not be numeric (like [MatchNumGT]). -func (f *SearchFilters) AddObjectIDFilter(m SearchMatchType, id oid.ID) { - f.addFilter(m, FilterID, id) +// FilterParentIs returns search filter selecting only child objects for the +// given one. Relates to [Header.ParentID]. +func FilterParentIs(id oid.ID) SearchFilter { + return NewSearchFilter(FilterParentID, FilterOpEQ, id.EncodeToString()) } -// AddSplitIDFilter adds filter by split ID. -// -// The m must not be numeric (like [MatchNumGT]). -func (f *SearchFilters) AddSplitIDFilter(m SearchMatchType, id SplitID) { - f.addFilter(m, FilterSplitID, staticStringer(id.String())) +// FilterFirstSplitObjectIs returns search filter selecting split-chain elements +// with the specified first one. Relates to [Header.FirstSplitObject] and +// [SplitInfo.FirstPart]. +func FilterFirstSplitObjectIs(id oid.ID) SearchFilter { + return NewSearchFilter(FilterFirstSplitObject, FilterOpEQ, id.EncodeToString()) } -// AddFirstSplitObjectFilter adds filter by first object ID. -// -// The m must not be numeric (like [MatchNumGT]). -func (f *SearchFilters) AddFirstSplitObjectFilter(m SearchMatchType, id oid.ID) { - f.addFilter(m, FilterFirstSplitObject, staticStringer(id.String())) +// FilterTypeIs returns search filter selecting objects of certain type. Relates +// to [Header.Type]. +func FilterTypeIs(typ Type) SearchFilter { + return NewSearchFilter(FilterType, FilterOpEQ, typ.EncodeToString()) } -// AddTypeFilter adds filter by object type. -// -// The m must not be numeric (like [MatchNumGT]). -func (f *SearchFilters) AddTypeFilter(m SearchMatchType, typ Type) { - f.addFilter(m, FilterType, staticStringer(typ.EncodeToString())) +// FilterByCreationEpoch returns search filter selecting objects by creation +// time in NeoFS epochs. Relates to [Header.CreationEpoch]. Use +// [FilterByCreationTime] to specify Unix time format. +func FilterByCreationEpoch(op FilterOp, val uint64) SearchFilter { + return NewSearchFilter(FilterCreationEpoch, op, strconv.FormatUint(val, 10)) } -// MarshalJSON encodes [SearchFilters] to protobuf JSON format. -// -// See also [SearchFilters.UnmarshalJSON]. -func (f *SearchFilters) MarshalJSON() ([]byte, error) { - return json.Marshal(f.ToV2()) +// FilterByPayloadSize returns search filter selecting objects by payload size. +// Relates to [Header.PayloadSize]. +func FilterByPayloadSize(op FilterOp, val uint64) SearchFilter { + return NewSearchFilter(FilterPayloadSize, op, strconv.FormatUint(val, 10)) } -// UnmarshalJSON decodes [SearchFilters] from protobuf JSON format. -// -// See also [SearchFilters.MarshalJSON]. -func (f *SearchFilters) UnmarshalJSON(data []byte) error { - var fsV2 []v2object.SearchFilter - - if err := json.Unmarshal(data, &fsV2); err != nil { - return err - } - - *f = NewSearchFiltersFromV2(fsV2) - - return nil +// FilterByName returns filter selecting objects by their human-readable names +// set as 'Name' attribute (see [SetName]). +func FilterByName(op FilterOp, name string) SearchFilter { + return NewSearchFilter(attributeName, op, name) } -// AddPayloadHashFilter adds filter by payload hash. -// -// The m must not be numeric (like [MatchNumGT]). -func (f *SearchFilters) AddPayloadHashFilter(m SearchMatchType, sum [sha256.Size]byte) { - f.addFilter(m, FilterPayloadChecksum, staticStringer(hex.EncodeToString(sum[:]))) +// FilterByFileName returns filter selecting objects by file names associated +// with them through 'FileName' attribute (see [SetFileName]). +func FilterByFileName(op FilterOp, name string) SearchFilter { + return NewSearchFilter(attributeFileName, op, name) } -// AddHomomorphicHashFilter adds filter by homomorphic hash. -// -// The m must not be numeric (like [MatchNumGT]). -func (f *SearchFilters) AddHomomorphicHashFilter(m SearchMatchType, sum [tz.Size]byte) { - f.addFilter(m, FilterPayloadHomomorphicHash, staticStringer(hex.EncodeToString(sum[:]))) +// FilterByFilePath returns filter selecting objects by filesystem paths +// associated with them through 'FilePath' attribute (see [SetFilePath]). +func FilterByFilePath(op FilterOp, name string) SearchFilter { + return NewSearchFilter(attributeFilePath, op, name) } -// AddCreationEpochFilter adds filter by creation epoch. -func (f *SearchFilters) AddCreationEpochFilter(m SearchMatchType, epoch uint64) { - f.addFilter(m, FilterCreationEpoch, staticStringer(strconv.FormatUint(epoch, 10))) +// FilterByCreationTime returns filter selecting objects by their creation time +// in Unix Timestamp format set as 'Timestamp' attribute (see +// [SetCreationTime]). Use [FilterByCreationEpoch] to specify NeoFS time format. +func FilterByCreationTime(op FilterOp, t time.Time) SearchFilter { + return NewSearchFilter(attributeTimestamp, op, strconv.FormatInt(t.Unix(), 10)) } -// AddPayloadSizeFilter adds filter by payload size. -func (f *SearchFilters) AddPayloadSizeFilter(m SearchMatchType, size uint64) { - f.addFilter(m, FilterPayloadSize, staticStringer(strconv.FormatUint(size, 10))) +// FilterByContentType returns filter selecting objects by content type of their +// payload set as 'Content-Type' attribute (see [SetContentType]). +func FilterByContentType(op FilterOp, name string) SearchFilter { + return NewSearchFilter(attributeContentType, op, name) } diff --git a/object/search_test.go b/object/search_test.go index 8e62bdefe..4886d3219 100644 --- a/object/search_test.go +++ b/object/search_test.go @@ -1,467 +1,3 @@ package object_test -import ( - "crypto/sha256" - "encoding/hex" - "fmt" - "math/rand" - "strconv" - "testing" - - v2object "github.com/nspcc-dev/neofs-api-go/v2/object" - "github.com/nspcc-dev/neofs-sdk-go/checksum" - cidtest "github.com/nspcc-dev/neofs-sdk-go/container/id/test" - "github.com/nspcc-dev/neofs-sdk-go/object" - oid "github.com/nspcc-dev/neofs-sdk-go/object/id" - oidtest "github.com/nspcc-dev/neofs-sdk-go/object/id/test" - objecttest "github.com/nspcc-dev/neofs-sdk-go/object/test" - usertest "github.com/nspcc-dev/neofs-sdk-go/user/test" - versiontest "github.com/nspcc-dev/neofs-sdk-go/version/test" - "github.com/nspcc-dev/tzhash/tz" - "github.com/stretchr/testify/require" -) - -var eqV2Matches = map[object.SearchMatchType]v2object.MatchType{ - object.MatchUnknown: v2object.MatchUnknown, - object.MatchStringEqual: v2object.MatchStringEqual, - object.MatchStringNotEqual: v2object.MatchStringNotEqual, - object.MatchNotPresent: v2object.MatchNotPresent, - object.MatchCommonPrefix: v2object.MatchCommonPrefix, - object.MatchNumGT: v2object.MatchNumGT, - object.MatchNumGE: v2object.MatchNumGE, - object.MatchNumLT: v2object.MatchNumLT, - object.MatchNumLE: v2object.MatchNumLE, -} - -func TestMatch(t *testing.T) { - t.Run("known matches", func(t *testing.T) { - for matchType, matchTypeV2 := range eqV2Matches { - require.Equal(t, matchTypeV2, matchType.ToV2()) - require.Equal(t, object.SearchMatchFromV2(matchTypeV2), matchType) - } - }) - - t.Run("unknown matches", func(t *testing.T) { - var unknownMatchType object.SearchMatchType - - for matchType := range eqV2Matches { - unknownMatchType += matchType - } - - unknownMatchType++ - - require.Equal(t, unknownMatchType.ToV2(), v2object.MatchUnknown) - - var unknownMatchTypeV2 v2object.MatchType - - for _, matchTypeV2 := range eqV2Matches { - unknownMatchTypeV2 += matchTypeV2 - } - - unknownMatchTypeV2++ - - require.Equal(t, object.SearchMatchFromV2(unknownMatchTypeV2), object.MatchUnknown) - }) -} - -func TestFilter(t *testing.T) { - inputs := [][]string{ - {"user-header", "user-value"}, - } - - filters := object.NewSearchFilters() - for i := range inputs { - filters.AddFilter(inputs[i][0], inputs[i][1], object.MatchStringEqual) - } - - require.Len(t, filters, len(inputs)) - for i := range inputs { - require.Equal(t, inputs[i][0], filters[i].Header()) - require.Equal(t, inputs[i][1], filters[i].Value()) - require.Equal(t, object.MatchStringEqual, filters[i].Operation()) - } - - v2 := filters.ToV2() - newFilters := object.NewSearchFiltersFromV2(v2) - require.Equal(t, filters, newFilters) -} - -func TestSearchFilters_AddRootFilter(t *testing.T) { - fs := new(object.SearchFilters) - - fs.AddRootFilter() - - require.Len(t, *fs, 1) - - f := (*fs)[0] - - require.Equal(t, object.MatchUnknown, f.Operation()) - require.Equal(t, object.FilterRoot, f.Header()) - require.Equal(t, "", f.Value()) - - t.Run("v2", func(t *testing.T) { - fsV2 := fs.ToV2() - - require.Len(t, fsV2, 1) - - require.Equal(t, v2object.FilterPropertyRoot, fsV2[0].GetKey()) - require.Equal(t, "", fsV2[0].GetValue()) - require.Equal(t, v2object.MatchUnknown, fsV2[0].GetMatchType()) - }) -} - -func TestSearchFilters_AddPhyFilter(t *testing.T) { - fs := new(object.SearchFilters) - - fs.AddPhyFilter() - - require.Len(t, *fs, 1) - - f := (*fs)[0] - - require.Equal(t, object.MatchUnknown, f.Operation()) - require.Equal(t, object.FilterPhysical, f.Header()) - require.Equal(t, "", f.Value()) - - t.Run("v2", func(t *testing.T) { - fsV2 := fs.ToV2() - - require.Len(t, fsV2, 1) - - require.Equal(t, v2object.FilterPropertyPhy, fsV2[0].GetKey()) - require.Equal(t, "", fsV2[0].GetValue()) - require.Equal(t, v2object.MatchUnknown, fsV2[0].GetMatchType()) - }) -} - -func testOID() oid.ID { - cs := [sha256.Size]byte{} - //nolint:staticcheck - rand.Read(cs[:]) - - var id oid.ID - id.SetSHA256(cs) - - return id -} - -func TestSearchFilters_AddParentIDFilter(t *testing.T) { - par := testOID() - - fs := object.SearchFilters{} - fs.AddParentIDFilter(object.MatchStringEqual, par) - - require.Len(t, fs, 1) - - f := fs[0] - - require.Equal(t, object.FilterParentID, f.Header()) - require.Equal(t, par.EncodeToString(), f.Value()) - require.Equal(t, object.MatchStringEqual, f.Operation()) - - t.Run("v2", func(t *testing.T) { - fsV2 := fs.ToV2() - - require.Len(t, fsV2, 1) - - require.Equal(t, v2object.FilterHeaderParent, fsV2[0].GetKey()) - require.Equal(t, par.EncodeToString(), fsV2[0].GetValue()) - require.Equal(t, v2object.MatchStringEqual, fsV2[0].GetMatchType()) - }) -} - -func TestSearchFilters_AddObjectIDFilter(t *testing.T) { - id := testOID() - - fs := new(object.SearchFilters) - fs.AddObjectIDFilter(object.MatchStringEqual, id) - - require.Len(t, *fs, 1) - - f := (*fs)[0] - - require.Equal(t, object.FilterID, f.Header()) - require.Equal(t, id.EncodeToString(), f.Value()) - require.Equal(t, object.MatchStringEqual, f.Operation()) - - t.Run("v2", func(t *testing.T) { - fsV2 := fs.ToV2() - - require.Len(t, fsV2, 1) - - require.Equal(t, v2object.FilterHeaderObjectID, fsV2[0].GetKey()) - require.Equal(t, id.EncodeToString(), fsV2[0].GetValue()) - require.Equal(t, v2object.MatchStringEqual, fsV2[0].GetMatchType()) - }) -} - -func TestSearchFilters_AddSplitIDFilter(t *testing.T) { - id := *object.NewSplitID() - - fs := new(object.SearchFilters) - fs.AddSplitIDFilter(object.MatchStringEqual, id) - - f := (*fs)[0] - - require.Equal(t, object.FilterSplitID, f.Header()) - require.Equal(t, id.String(), f.Value()) - require.Equal(t, object.MatchStringEqual, f.Operation()) - - t.Run("v2", func(t *testing.T) { - fsV2 := fs.ToV2() - - require.Len(t, fsV2, 1) - - require.Equal(t, v2object.FilterHeaderSplitID, fsV2[0].GetKey()) - require.Equal(t, id.String(), fsV2[0].GetValue()) - require.Equal(t, v2object.MatchStringEqual, fsV2[0].GetMatchType()) - }) -} - -func TestSearchFilters_AddFirstIDFilter(t *testing.T) { - id := testOID() - - fs := new(object.SearchFilters) - fs.AddFirstSplitObjectFilter(object.MatchStringEqual, id) - - f := (*fs)[0] - - require.Equal(t, object.FilterFirstSplitObject, f.Header()) - require.Equal(t, id.String(), f.Value()) - require.Equal(t, object.MatchStringEqual, f.Operation()) - - t.Run("v2", func(t *testing.T) { - fsV2 := fs.ToV2() - - require.Len(t, fsV2, 1) - - require.Equal(t, v2object.ReservedFilterPrefix+"split.first", fsV2[0].GetKey()) - require.Equal(t, id.String(), fsV2[0].GetValue()) - require.Equal(t, v2object.MatchStringEqual, fsV2[0].GetMatchType()) - }) -} - -func TestSearchFilters_AddTypeFilter(t *testing.T) { - typ := object.TypeTombstone - - fs := new(object.SearchFilters) - fs.AddTypeFilter(object.MatchStringEqual, typ) - - f := (*fs)[0] - - require.Equal(t, object.FilterType, f.Header()) - require.Equal(t, typ.String(), f.Value()) - require.Equal(t, object.MatchStringEqual, f.Operation()) - - t.Run("v2", func(t *testing.T) { - fsV2 := fs.ToV2() - - require.Len(t, fsV2, 1) - - require.Equal(t, v2object.FilterHeaderObjectType, fsV2[0].GetKey()) - require.Equal(t, typ.EncodeToString(), fsV2[0].GetValue()) - require.Equal(t, v2object.MatchStringEqual, fsV2[0].GetMatchType()) - }) -} - -func TestSearchFiltersEncoding(t *testing.T) { - fs := object.NewSearchFilters() - fs.AddFilter("key 1", "value 2", object.MatchStringEqual) - fs.AddFilter("key 2", "value 2", object.MatchStringNotEqual) - fs.AddFilter("key 2", "value 2", object.MatchCommonPrefix) - - t.Run("json", func(t *testing.T) { - data, err := fs.MarshalJSON() - require.NoError(t, err) - - fs2 := object.NewSearchFilters() - require.NoError(t, fs2.UnmarshalJSON(data)) - - require.Equal(t, fs, fs2) - }) -} - -func TestSearchMatchType_String(t *testing.T) { - toPtr := func(v object.SearchMatchType) *object.SearchMatchType { - return &v - } - - testEnumStrings(t, new(object.SearchMatchType), []enumStringItem{ - {val: toPtr(object.MatchCommonPrefix), str: "COMMON_PREFIX"}, - {val: toPtr(object.MatchStringEqual), str: "STRING_EQUAL"}, - {val: toPtr(object.MatchStringNotEqual), str: "STRING_NOT_EQUAL"}, - {val: toPtr(object.MatchNotPresent), str: "NOT_PRESENT"}, - {val: toPtr(object.MatchUnknown), str: "MATCH_TYPE_UNSPECIFIED"}, - {val: toPtr(object.MatchNumGT), str: "NUM_GT"}, - {val: toPtr(object.MatchNumGE), str: "NUM_GE"}, - {val: toPtr(object.MatchNumLT), str: "NUM_LT"}, - {val: toPtr(object.MatchNumLE), str: "NUM_LE"}, - }) -} - -func testChecksumSha256() [sha256.Size]byte { - cs := [sha256.Size]byte{} - //nolint:staticcheck - rand.Read(cs[:]) - - return cs -} - -func testChecksumTZ() [tz.Size]byte { - cs := [tz.Size]byte{} - //nolint:staticcheck - rand.Read(cs[:]) - - return cs -} - -func TestSearchFilters_AddPayloadHashFilter(t *testing.T) { - cs := testChecksumSha256() - - fs := new(object.SearchFilters) - fs.AddPayloadHashFilter(object.MatchStringEqual, cs) - - f := (*fs)[0] - - require.Equal(t, object.FilterPayloadChecksum, f.Header()) - require.Equal(t, hex.EncodeToString(cs[:]), f.Value()) - require.Equal(t, object.MatchStringEqual, f.Operation()) - - t.Run("v2", func(t *testing.T) { - fsV2 := fs.ToV2() - - require.Len(t, fsV2, 1) - - require.Equal(t, v2object.FilterHeaderPayloadHash, fsV2[0].GetKey()) - require.Equal(t, hex.EncodeToString(cs[:]), fsV2[0].GetValue()) - require.Equal(t, v2object.MatchStringEqual, fsV2[0].GetMatchType()) - }) -} - -func ExampleSearchFilters_AddPayloadHashFilter() { - hash, _ := hex.DecodeString("66842cfea090b1d906b52400fae49d86df078c0670f2bdd059ba289ebe24a498") - - var v [sha256.Size]byte - copy(v[:], hash[:sha256.Size]) - - var cs checksum.Checksum - cs.SetSHA256(v) - - fmt.Println(hex.EncodeToString(cs.Value())) - // Output: 66842cfea090b1d906b52400fae49d86df078c0670f2bdd059ba289ebe24a498 -} - -func TestSearchFilters_AddHomomorphicHashFilter(t *testing.T) { - cs := testChecksumTZ() - - fs := new(object.SearchFilters) - fs.AddHomomorphicHashFilter(object.MatchStringEqual, cs) - - f := (*fs)[0] - - require.Equal(t, object.FilterPayloadHomomorphicHash, f.Header()) - require.Equal(t, hex.EncodeToString(cs[:]), f.Value()) - require.Equal(t, object.MatchStringEqual, f.Operation()) - - t.Run("v2", func(t *testing.T) { - fsV2 := fs.ToV2() - - require.Len(t, fsV2, 1) - - require.Equal(t, v2object.FilterHeaderHomomorphicHash, fsV2[0].GetKey()) - require.Equal(t, hex.EncodeToString(cs[:]), fsV2[0].GetValue()) - require.Equal(t, v2object.MatchStringEqual, fsV2[0].GetMatchType()) - }) -} - -func ExampleSearchFilters_AddHomomorphicHashFilter() { - hash, _ := hex.DecodeString("7e302ebb3937e810feb501965580c746048db99cebd095c3ce27022407408bf904dde8d9aa8085d2cf7202345341cc947fa9d722c6b6699760d307f653815d0c") - - var v [tz.Size]byte - copy(v[:], hash[:tz.Size]) - - var cs checksum.Checksum - cs.SetTillichZemor(v) - - fmt.Println(hex.EncodeToString(cs.Value())) - // Output: 7e302ebb3937e810feb501965580c746048db99cebd095c3ce27022407408bf904dde8d9aa8085d2cf7202345341cc947fa9d722c6b6699760d307f653815d0c -} - -func TestSearchFilters_AddCreationEpochFilter(t *testing.T) { - epoch := rand.Uint64() - - fs := new(object.SearchFilters) - fs.AddCreationEpochFilter(object.MatchStringEqual, epoch) - - require.Len(t, *fs, 1) - - f := (*fs)[0] - - require.Equal(t, object.FilterCreationEpoch, f.Header()) - require.Equal(t, strconv.FormatUint(epoch, 10), f.Value()) - require.Equal(t, object.MatchStringEqual, f.Operation()) - - t.Run("v2", func(t *testing.T) { - fsV2 := fs.ToV2() - - require.Len(t, fsV2, 1) - - require.Equal(t, v2object.FilterHeaderCreationEpoch, fsV2[0].GetKey()) - require.Equal(t, strconv.FormatUint(epoch, 10), fsV2[0].GetValue()) - require.Equal(t, v2object.MatchStringEqual, fsV2[0].GetMatchType()) - }) -} - -func TestSearchFilters_AddPayloadSizeFilter(t *testing.T) { - size := rand.Uint64() - - fs := new(object.SearchFilters) - fs.AddPayloadSizeFilter(object.MatchStringEqual, size) - - require.Len(t, *fs, 1) - - f := (*fs)[0] - - require.Equal(t, object.FilterPayloadSize, f.Header()) - require.Equal(t, strconv.FormatUint(size, 10), f.Value()) - require.Equal(t, object.MatchStringEqual, f.Operation()) - - t.Run("v2", func(t *testing.T) { - fsV2 := fs.ToV2() - - require.Len(t, fsV2, 1) - - require.Equal(t, v2object.FilterHeaderPayloadLength, fsV2[0].GetKey()) - require.Equal(t, strconv.FormatUint(size, 10), fsV2[0].GetValue()) - require.Equal(t, v2object.MatchStringEqual, fsV2[0].GetMatchType()) - }) -} - -func TestSearchFilters_HasNonAttributeFilter(t *testing.T) { - const anyMatcher = object.MatchStringEqual - var fs object.SearchFilters - - fs.AddFilter("key", "value", anyMatcher) - require.False(t, fs[0].IsNonAttribute()) - - for _, f := range []func(){ - func() { fs.AddFilter("$Object:any", "", anyMatcher) }, - func() { fs.AddObjectVersionFilter(anyMatcher, versiontest.Version()) }, - func() { fs.AddParentIDFilter(anyMatcher, oidtest.ID()) }, - func() { fs.AddObjectContainerIDFilter(anyMatcher, cidtest.ID()) }, - func() { fs.AddObjectOwnerIDFilter(anyMatcher, usertest.ID(t)) }, - func() { fs.AddCreationEpochFilter(anyMatcher, rand.Uint64()) }, - func() { fs.AddPayloadSizeFilter(anyMatcher, rand.Uint64()) }, - func() { fs.AddPayloadHashFilter(anyMatcher, [sha256.Size]byte{1}) }, - func() { fs.AddTypeFilter(anyMatcher, object.TypeTombstone) }, - func() { fs.AddHomomorphicHashFilter(anyMatcher, [tz.Size]byte{1}) }, - func() { fs.AddParentIDFilter(anyMatcher, oidtest.ID()) }, - func() { fs.AddSplitIDFilter(anyMatcher, objecttest.SplitID()) }, - func() { fs.AddRootFilter() }, - func() { fs.AddPhyFilter() }, - } { - fs = fs[:0] - f() - require.True(t, fs[0].IsNonAttribute()) - } -} +// TODO diff --git a/object/slicer/slicer.go b/object/slicer/slicer.go index d350a0ed3..1e5843dde 100644 --- a/object/slicer/slicer.go +++ b/object/slicer/slicer.go @@ -21,11 +21,6 @@ import ( "github.com/nspcc-dev/tzhash/tz" ) -var ( - // ErrIncompleteHeader indicates some fields are missing in header. - ErrIncompleteHeader = errors.New("incomplete header") -) - // ObjectWriter represents a virtual object recorder. type ObjectWriter interface { // ObjectPutInit initializes and returns a stream of writable data associated diff --git a/object/split.go b/object/split.go new file mode 100644 index 000000000..bdd820239 --- /dev/null +++ b/object/split.go @@ -0,0 +1,317 @@ +package object + +import ( + "errors" + "fmt" + + "github.com/nspcc-dev/neofs-sdk-go/api/link" + "github.com/nspcc-dev/neofs-sdk-go/api/object" + "github.com/nspcc-dev/neofs-sdk-go/api/refs" + oid "github.com/nspcc-dev/neofs-sdk-go/object/id" + "google.golang.org/protobuf/proto" +) + +// SplitChainElement describes an object in the chain of dividing a NeoFS object +// into several parts. +type SplitChainElement struct { + id oid.ID + sz uint32 +} + +// ID returns element ID. +// +// See also [SplitChainElement.SetID], [Object.ID]. +func (x SplitChainElement) ID() oid.ID { + return x.id +} + +// SetID sets element ID. +// +// See also [SplitChainElement.ID]. +func (x *SplitChainElement) SetID(id oid.ID) { + x.id = id +} + +// PayloadSize returns size of the element payload. +// +// See also [SplitChainElement.SetPayloadSize], [Header.PayloadSize]. +func (x SplitChainElement) PayloadSize() uint32 { + return x.sz +} + +// SetPayloadSize sets size of the element payload. +// +// See also [SplitChainElement.PayloadSize]. +func (x *SplitChainElement) SetPayloadSize(sz uint32) { + x.sz = sz +} + +// SplitChain describes split-chain of a NeoFS object divided into several +// parts. SplitChain is stored and transmitted as payload of system NeoFS +// objects. +type SplitChain struct { + els []SplitChainElement +} + +// readFromV2 reads SplitChain from the link.Link message. Returns an error if +// the message is malformed according to the NeoFS API V2 protocol. The message +// must not be nil. +// +// ReadFromV2 is intended to be used by the NeoFS API V2 client/server +// implementation only and is not expected to be directly used by applications. +// +// See also writeToV2. +func (x *SplitChain) readFromV2(m *link.Link) error { + if len(m.Children) == 0 { + return fmt.Errorf("missing elements") + } + + x.els = make([]SplitChainElement, len(m.Children)) + for i := range m.Children { + if m.Children[i] == nil { + return fmt.Errorf("element #%d is nil", i) + } + if m.Children[i].Id == nil { + return fmt.Errorf("invalid element #%d: missing ID", i) + } + err := x.els[i].id.ReadFromV2(m.Children[i].Id) + if err != nil { + return fmt.Errorf("invalid element #%d: invalid ID: %w", i, err) + } + x.els[i].sz = m.Children[i].Size + } + + return nil +} + +// writeToV2 writes SplitChain to the link.Link message of the NeoFS API +// protocol. +// +// WriteToV2 is intended to be used by the NeoFS API V2 client/server +// implementation only and is not expected to be directly used by applications. +// +// See also readFromV2. +func (x SplitChain) writeToV2(m *link.Link) { + if x.els != nil { + m.Children = make([]*link.Link_MeasuredObject, len(x.els)) + for i := range x.els { + m.Children[i] = &link.Link_MeasuredObject{ + Id: new(refs.ObjectID), + Size: x.els[i].sz, + } + x.els[i].id.WriteToV2(m.Children[i].Id) + } + } else { + m.Children = nil + } +} + +// Marshal encodes SplitChain into a Protocol Buffers V3 binary format. +// +// See also [SplitChain.Unmarshal]. +func (x SplitChain) Marshal() []byte { + var m link.Link + x.writeToV2(&m) + + b, err := proto.Marshal(&m) + if err != nil { + // while it is bad to panic on external package return, we can do nothing better + // for this case: how can a normal message not be encoded? + panic(fmt.Errorf("unexpected marshal protobuf message failure: %w", err)) + } + return b +} + +// Unmarshal decodes Protocol Buffers V3 binary data into the SplitChain. +// Returns an error if the message is malformed according to the NeoFS API V2 +// protocol. +// +// See also [SplitChain.Marshal]. +func (x *SplitChain) Unmarshal(data []byte) error { + var m link.Link + err := proto.Unmarshal(data, &m) + if err != nil { + return fmt.Errorf("decode protobuf: %w", err) + } + return x.readFromV2(&m) +} + +// Elements returns sorted list with split-chain elements. +func (x SplitChain) Elements() []SplitChainElement { + return x.els +} + +// SetElements sets sorted list of split-chain elements. +func (x *SplitChain) SetElements(els []SplitChainElement) { + x.els = els +} + +// SplitInfo represents a collection of references related to particular +// [SplitChain]. +type SplitInfo struct { + first, last, link oid.ID + // deprecated + splitID []byte + + firstSet, lastSet, linkSet bool +} + +// ReadFromV2 reads SplitInfo from the [object.SplitInfo] message. Returns an +// error if the message is malformed according to the NeoFS API V2 protocol. The +// message must not be nil. +// +// ReadFromV2 is intended to be used by the NeoFS API V2 client/server +// implementation only and is not expected to be directly used by applications. +// +// See also [SplitInfo.WriteToV2]. +func (s *SplitInfo) ReadFromV2(m *object.SplitInfo) error { + if ln := len(m.SplitId); ln > 0 && ln != 16 { + return fmt.Errorf("invalid split ID length %d", ln) + } + + s.lastSet = m.LastPart != nil + s.linkSet = m.Link != nil + if !s.lastSet && !s.linkSet { + return errors.New("both linking and last split-chain elements are missing") + } + + if s.lastSet { + err := s.last.ReadFromV2(m.LastPart) + if err != nil { + return fmt.Errorf("invalid last split-chain element: %w", err) + } + } + if s.linkSet { + err := s.link.ReadFromV2(m.Link) + if err != nil { + return fmt.Errorf("invalid linking split-chain element: %w", err) + } + } + if s.firstSet = m.FirstPart != nil; s.firstSet { + err := s.first.ReadFromV2(m.FirstPart) + if err != nil { + return fmt.Errorf("invalid first split-chain element: %w", err) + } + } + + s.splitID = m.SplitId + + return nil +} + +// WriteToV2 writes SplitInfo to the [object.SplitInfo] message of the NeoFS API +// protocol. +// +// WriteToV2 is intended to be used by the NeoFS API V2 client/server +// implementation only and is not expected to be directly used by applications. +// +// See also [SplitInfo.ReadFromV2]. +func (s SplitInfo) WriteToV2(m *object.SplitInfo) { + if s.lastSet { + m.LastPart = new(refs.ObjectID) + s.last.WriteToV2(m.LastPart) + } + if s.linkSet { + m.Link = new(refs.ObjectID) + s.link.WriteToV2(m.Link) + } + if s.firstSet { + m.FirstPart = new(refs.ObjectID) + s.first.WriteToV2(m.FirstPart) + } + m.SplitId = s.splitID +} + +// LastPart returns identifier of the last split-chain element. Zero return +// indicates unset relation. +// +// See also [SplitInfo.SetLastPart], [SplitChain.Elements]. +func (s SplitInfo) LastPart() oid.ID { + if s.lastSet { + return s.last + } + return oid.ID{} +} + +// SetLastPart sets identifier of the last split-chain element. +// +// See also [SplitInfo.LastPart]. +func (s *SplitInfo) SetLastPart(v oid.ID) { + s.last, s.lastSet = v, true +} + +// Linker returns identifier of the object carrying full [SplitChain] in its +// payload. Zero return indicates unset relation. +// +// See also [SplitInfo.SetLinker]. +func (s SplitInfo) Linker() oid.ID { + if s.linkSet { + return s.link + } + return oid.ID{} +} + +// SetLinker sets identifier of the object carrying full information about the +// split-chain in its payload. +// +// See also [SplitInfo.Linker], [SplitChain]. +func (s *SplitInfo) SetLinker(v oid.ID) { + s.link, s.linkSet = v, true +} + +// FirstPart returns identifier of the first split-chain element. Zero return +// indicates unset relation. +// +// See also [SplitInfo.SetFirstPart], [Header.FirstSplitObject], +// [SplitChain.Elements]. +func (s SplitInfo) FirstPart() oid.ID { + if s.firstSet { + return s.first + } + return oid.ID{} +} + +// SetFirstPart sets identifier of the first split-chain element. +// +// See also [SplitInfo.FirstPart]. +func (s *SplitInfo) SetFirstPart(v oid.ID) { + s.first, s.firstSet = v, true +} + +// Marshal encodes SplitInfo into a Protocol Buffers V3 binary format. +// +// See also [SplitInfo.Unmarshal]. +func (s SplitInfo) Marshal() []byte { + var m object.SplitInfo + s.WriteToV2(&m) + + b, err := proto.Marshal(&m) + if err != nil { + // while it is bad to panic on external package return, we can do nothing better + // for this case: how can a normal message not be encoded? + panic(fmt.Errorf("unexpected marshal protobuf message failure: %w", err)) + } + return b +} + +// Unmarshal decodes Protocol Buffers V3 binary data into the SplitInfo. Returns +// an error if the message is malformed according to the NeoFS API V2 protocol. +// +// See also [SplitInfo.Marshal]. +func (s *SplitInfo) Unmarshal(data []byte) error { + var m object.SplitInfo + err := proto.Unmarshal(data, &m) + if err != nil { + return fmt.Errorf("decode protobuf: %w", err) + } + return s.ReadFromV2(&m) +} + +// SplitInfoError is an error wrapping SplitInfo which is returned to indicate +// split object: an object presented as several smaller objects. +type SplitInfoError SplitInfo + +// Error implements built-in error interface. +func (s SplitInfoError) Error() string { + return "object is split" +} diff --git a/object/split_test.go b/object/split_test.go new file mode 100644 index 000000000..4886d3219 --- /dev/null +++ b/object/split_test.go @@ -0,0 +1,3 @@ +package object_test + +// TODO diff --git a/object/splitid.go b/object/splitid.go deleted file mode 100644 index 8d890f2ff..000000000 --- a/object/splitid.go +++ /dev/null @@ -1,80 +0,0 @@ -package object - -import ( - "github.com/google/uuid" -) - -// SplitID is a UUIDv4 used as attribute in split objects. -type SplitID struct { - uuid uuid.UUID -} - -// NewSplitID returns UUID representation of splitID attribute. -// -// Defaults: -// - id: random UUID. -func NewSplitID() *SplitID { - return &SplitID{ - uuid: uuid.New(), - } -} - -// NewSplitIDFromV2 returns parsed UUID from bytes. -// If v is invalid UUIDv4 byte sequence, then function returns nil. -// -// Nil converts to nil. -func NewSplitIDFromV2(v []byte) *SplitID { - if v == nil { - return nil - } - - id := uuid.New() - - err := id.UnmarshalBinary(v) - if err != nil { - return nil - } - - return &SplitID{ - uuid: id, - } -} - -// Parse converts UUIDv4 string representation into [SplitID]. -func (id *SplitID) Parse(s string) (err error) { - id.uuid, err = uuid.Parse(s) - if err != nil { - return err - } - - return nil -} - -// String returns UUIDv4 string representation of [SplitID]. -func (id *SplitID) String() string { - if id == nil { - return "" - } - - return id.uuid.String() -} - -// SetUUID sets pre created UUID structure as [SplitID]. -func (id *SplitID) SetUUID(v uuid.UUID) { - if id != nil { - id.uuid = v - } -} - -// ToV2 converts [SplitID] to a representation of SplitID in neofs-api v2. -// -// Nil SplitID converts to nil. -func (id *SplitID) ToV2() []byte { - if id == nil { - return nil - } - - data, _ := id.uuid.MarshalBinary() // err is always nil - - return data -} diff --git a/object/splitid_test.go b/object/splitid_test.go deleted file mode 100644 index 6a15ac31e..000000000 --- a/object/splitid_test.go +++ /dev/null @@ -1,63 +0,0 @@ -package object_test - -import ( - "testing" - - "github.com/google/uuid" - "github.com/nspcc-dev/neofs-sdk-go/object" - "github.com/stretchr/testify/require" -) - -func TestSplitID(t *testing.T) { - id := object.NewSplitID() - - t.Run("toV2/fromV2", func(t *testing.T) { - data := id.ToV2() - - newID := object.NewSplitIDFromV2(data) - require.NotNil(t, newID) - - require.Equal(t, id, newID) - }) - - t.Run("string/parse", func(t *testing.T) { - idStr := id.String() - - newID := object.NewSplitID() - require.NoError(t, newID.Parse(idStr)) - - require.Equal(t, id, newID) - }) - - t.Run("set UUID", func(t *testing.T) { - newUUID := uuid.New() - id.SetUUID(newUUID) - - require.Equal(t, newUUID.String(), id.String()) - }) - - t.Run("nil value", func(t *testing.T) { - var newID *object.SplitID - - require.NotPanics(t, func() { - require.Nil(t, newID.ToV2()) - require.Equal(t, "", newID.String()) - }) - }) -} - -func TestSplitID_ToV2(t *testing.T) { - t.Run("nil", func(t *testing.T) { - var x *object.SplitID - - require.Nil(t, x.ToV2()) - }) -} - -func TestNewIDFromV2(t *testing.T) { - t.Run("from nil", func(t *testing.T) { - var x []byte - - require.Nil(t, object.NewSplitIDFromV2(x)) - }) -} diff --git a/object/splitinfo.go b/object/splitinfo.go deleted file mode 100644 index 8b6a5c101..000000000 --- a/object/splitinfo.go +++ /dev/null @@ -1,213 +0,0 @@ -package object - -import ( - "errors" - "fmt" - - "github.com/nspcc-dev/neofs-api-go/v2/object" - "github.com/nspcc-dev/neofs-api-go/v2/refs" - oid "github.com/nspcc-dev/neofs-sdk-go/object/id" -) - -// SplitInfo is an SDK representation of [object.SplitInfo]. -type SplitInfo object.SplitInfo - -// NewSplitInfoFromV2 wraps v2 [object.SplitInfo] message to [SplitInfo]. -// -// Nil object.SplitInfo converts to nil. -func NewSplitInfoFromV2(v2 *object.SplitInfo) *SplitInfo { - return (*SplitInfo)(v2) -} - -// NewSplitInfo creates and initializes blank [SplitInfo]. -// -// Defaults: -// - splitID: nil; -// - lastPart nil; -// - link: nil. -func NewSplitInfo() *SplitInfo { - return NewSplitInfoFromV2(new(object.SplitInfo)) -} - -// ToV2 converts [SplitInfo] to v2 [object.SplitInfo] message. -// -// Nil SplitInfo converts to nil. -// -// The value returned shares memory with the structure itself, so changing it can lead to data corruption. -// Make a copy if you need to change it. -func (s *SplitInfo) ToV2() *object.SplitInfo { - return (*object.SplitInfo)(s) -} - -// SplitID returns [SplitID] if it has been set. New objects may miss it, -// use [SplitInfo.FirstPart] as a split chain identifier. -// -// The value returned shares memory with the structure itself, so changing it can lead to data corruption. -// Make a copy if you need to change it. -// -// See also [SplitInfo.SetSplitID]. -func (s *SplitInfo) SplitID() *SplitID { - return NewSplitIDFromV2( - (*object.SplitInfo)(s).GetSplitID()) -} - -// SetSplitID sets split ID in object ID. It resets split ID if nil passed. -// -// See also [SplitInfo.SplitID]. -// -// DEPRECATED.[SplitInfo.SetFirstPart] usage is required for the _new_ split -// objects, it serves as chain identification. -func (s *SplitInfo) SetSplitID(v *SplitID) { - (*object.SplitInfo)(s).SetSplitID(v.ToV2()) -} - -// LastPart returns last object ID, can be used to retrieve original object. -// The second return value is a flag, indicating if the last part is present. -// -// See also [SplitInfo.SetLastPart]. -func (s SplitInfo) LastPart() (v oid.ID, isSet bool) { - v2 := (object.SplitInfo)(s) - - lpV2 := v2.GetLastPart() - if lpV2 != nil { - _ = v.ReadFromV2(*lpV2) - isSet = true - } - - return -} - -// SetLastPart sets the last object ID. -// -// See also [SplitInfo.LastPart]. -func (s *SplitInfo) SetLastPart(v oid.ID) { - var idV2 refs.ObjectID - v.WriteToV2(&idV2) - - (*object.SplitInfo)(s).SetLastPart(&idV2) -} - -// Link returns a linker object ID. -// The second return value is a flag, indicating if the last part is present. -// -// See also [SplitInfo.SetLink]. -func (s SplitInfo) Link() (v oid.ID, isSet bool) { - v2 := (object.SplitInfo)(s) - - linkV2 := v2.GetLink() - if linkV2 != nil { - _ = v.ReadFromV2(*linkV2) - isSet = true - } - - return -} - -// SetLink sets linker object ID. -// -// See also [SplitInfo.Link]. -func (s *SplitInfo) SetLink(v oid.ID) { - var idV2 refs.ObjectID - v.WriteToV2(&idV2) - - (*object.SplitInfo)(s).SetLink(&idV2) -} - -// FirstPart returns the first part of the split chain. -// -// See also [SplitInfo.SetFirstPart]. -func (s SplitInfo) FirstPart() (v oid.ID, isSet bool) { - v2 := (object.SplitInfo)(s) - - firstV2 := v2.GetFirstPart() - if firstV2 != nil { - _ = v.ReadFromV2(*firstV2) - isSet = true - } - - return -} - -// SetFirstPart sets the first part of the split chain. -// -// See also [SplitInfo.FirstPart]. -func (s *SplitInfo) SetFirstPart(v oid.ID) { - var idV2 refs.ObjectID - v.WriteToV2(&idV2) - - (*object.SplitInfo)(s).SetFirstPart(&idV2) -} - -// Marshal marshals [SplitInfo] into a protobuf binary form. -// -// See also [SplitInfo.Unmarshal]. -func (s *SplitInfo) Marshal() ([]byte, error) { - return (*object.SplitInfo)(s).StableMarshal(nil), nil -} - -// Unmarshal unmarshals protobuf binary representation of [SplitInfo]. -// -// See also [SplitInfo.Marshal]. -func (s *SplitInfo) Unmarshal(data []byte) error { - err := (*object.SplitInfo)(s).Unmarshal(data) - if err != nil { - return err - } - - return formatCheckSI((*object.SplitInfo)(s)) -} - -// MarshalJSON implements json.Marshaler. -// -// See also [SplitInfo.UnmarshalJSON]. -func (s *SplitInfo) MarshalJSON() ([]byte, error) { - return (*object.SplitInfo)(s).MarshalJSON() -} - -// UnmarshalJSON implements json.Unmarshaler. -// -// See also [SplitInfo.MarshalJSON]. -func (s *SplitInfo) UnmarshalJSON(data []byte) error { - err := (*object.SplitInfo)(s).UnmarshalJSON(data) - if err != nil { - return err - } - - return formatCheckSI((*object.SplitInfo)(s)) -} - -var errSplitInfoMissingFields = errors.New("neither link object ID nor last part object ID is set") - -func formatCheckSI(v2 *object.SplitInfo) error { - link := v2.GetLink() - lastPart := v2.GetLastPart() - if link == nil && lastPart == nil { - return errSplitInfoMissingFields - } - - var oID oid.ID - - if link != nil { - err := oID.ReadFromV2(*link) - if err != nil { - return fmt.Errorf("could not convert link object ID: %w", err) - } - } - - if lastPart != nil { - err := oID.ReadFromV2(*lastPart) - if err != nil { - return fmt.Errorf("could not convert last part object ID: %w", err) - } - } - - firstPart := v2.GetFirstPart() - if firstPart != nil { // can be missing for old objects - err := oID.ReadFromV2(*firstPart) - if err != nil { - return fmt.Errorf("could not convert first part object ID: %w", err) - } - } - - return nil -} diff --git a/object/splitinfo_test.go b/object/splitinfo_test.go deleted file mode 100644 index 5a026899c..000000000 --- a/object/splitinfo_test.go +++ /dev/null @@ -1,170 +0,0 @@ -package object_test - -import ( - "crypto/rand" - "encoding/json" - "testing" - - objv2 "github.com/nspcc-dev/neofs-api-go/v2/object" - "github.com/nspcc-dev/neofs-sdk-go/object" - oid "github.com/nspcc-dev/neofs-sdk-go/object/id" - "github.com/stretchr/testify/require" -) - -func TestSplitInfo(t *testing.T) { - s := object.NewSplitInfo() - splitID := object.NewSplitID() - lastPart := generateID() - link := generateID() - firstPart := generateID() - - s.SetSplitID(splitID) - require.Equal(t, splitID, s.SplitID()) - - s.SetLastPart(lastPart) - lp, set := s.LastPart() - require.True(t, set) - require.Equal(t, lastPart, lp) - - s.SetLink(link) - l, set := s.Link() - require.True(t, set) - require.Equal(t, link, l) - - s.SetFirstPart(firstPart) - ip, set := s.FirstPart() - require.True(t, set) - require.Equal(t, firstPart, ip) -} - -func TestSplitInfoMarshal(t *testing.T) { - testToV2 := func(t *testing.T, s *object.SplitInfo) { - v2 := s.ToV2() - newS := object.NewSplitInfoFromV2(v2) - - require.Equal(t, s, newS) - } - testMarshal := func(t *testing.T, s *object.SplitInfo) { - data, err := s.Marshal() - require.NoError(t, err) - - newS := object.NewSplitInfo() - - err = newS.Unmarshal(data) - require.NoError(t, err) - require.Equal(t, s, newS) - } - - t.Run("good, all fields are set", func(t *testing.T) { - s := object.NewSplitInfo() - s.SetSplitID(object.NewSplitID()) - s.SetLink(generateID()) - s.SetLastPart(generateID()) - s.SetFirstPart(generateID()) - - testToV2(t, s) - testMarshal(t, s) - }) - t.Run("good, only link is set", func(t *testing.T) { - s := object.NewSplitInfo() - s.SetSplitID(object.NewSplitID()) - s.SetLink(generateID()) - - testToV2(t, s) - testMarshal(t, s) - }) - t.Run("good, only last part is set", func(t *testing.T) { - s := object.NewSplitInfo() - s.SetSplitID(object.NewSplitID()) - s.SetLastPart(generateID()) - - testToV2(t, s) - testMarshal(t, s) - }) - t.Run("bad, no fields are set", func(t *testing.T) { - s := object.NewSplitInfo() - s.SetSplitID(object.NewSplitID()) - - data, err := s.Marshal() - require.NoError(t, err) - require.Error(t, object.NewSplitInfo().Unmarshal(data)) - }) -} - -func generateID() oid.ID { - var buf [32]byte - _, _ = rand.Read(buf[:]) - - var id oid.ID - id.SetSHA256(buf) - - return id -} - -func TestNewSplitInfoFromV2(t *testing.T) { - t.Run("from nil", func(t *testing.T) { - var x *objv2.SplitInfo - - require.Nil(t, object.NewSplitInfoFromV2(x)) - }) -} - -func TestSplitInfo_ToV2(t *testing.T) { - t.Run("nil", func(t *testing.T) { - var x *object.SplitInfo - - require.Nil(t, x.ToV2()) - }) -} - -func TestNewSplitInfo(t *testing.T) { - t.Run("default values", func(t *testing.T) { - si := object.NewSplitInfo() - - // check initial values - require.Nil(t, si.SplitID()) - _, set := si.LastPart() - require.False(t, set) - _, set = si.Link() - require.False(t, set) - _, set = si.FirstPart() - require.False(t, set) - - // convert to v2 message - siV2 := si.ToV2() - - require.Nil(t, siV2.GetSplitID()) - require.Nil(t, siV2.GetLastPart()) - require.Nil(t, siV2.GetLink()) - require.Nil(t, siV2.GetFirstPart()) - }) -} - -func TestSplitInfoMarshalJSON(t *testing.T) { - t.Run("good", func(t *testing.T) { - s := object.NewSplitInfo() - s.SetSplitID(object.NewSplitID()) - s.SetLastPart(generateID()) - s.SetLink(generateID()) - s.SetFirstPart(generateID()) - - data, err := s.MarshalJSON() - require.NoError(t, err) - - actual := object.NewSplitInfo() - require.NoError(t, json.Unmarshal(data, actual)) - require.Equal(t, s, actual) - }) - t.Run("bad link", func(t *testing.T) { - data := `{"splitId":"Sn707289RrqDyJOrZMbMoQ==","lastPart":{"value":"Y7baWE0UdUOBr1ELKX3Q5v1LKRubQUbI81Q5UxCVeow="},"link":{"value":"bad"}}` - require.Error(t, json.Unmarshal([]byte(data), object.NewSplitInfo())) - }) - t.Run("bad last part", func(t *testing.T) { - data := `{"splitId":"Sn707289RrqDyJOrZMbMoQ==","lastPart":{"value":"bad"},"link":{"value":"eRyPNCNNxHfxPcjijlv05HEcdoep/b7eHNLRSmDlnts="}}` - require.Error(t, json.Unmarshal([]byte(data), object.NewSplitInfo())) - }) - t.Run("bad first part", func(t *testing.T) { - data := `{"splitId":"Sn707289RrqDyJOrZMbMoQ==","firstPart":{"value":"bad"},"link":{"value":"eRyPNCNNxHfxPcjijlv05HEcdoep/b7eHNLRSmDlnts="}}` - require.Error(t, json.Unmarshal([]byte(data), object.NewSplitInfo())) - }) -} diff --git a/object/test/generate.go b/object/test/generate.go index bff92c60d..f8aba75da 100644 --- a/object/test/generate.go +++ b/object/test/generate.go @@ -1,132 +1,92 @@ package objecttest import ( - "testing" + "math/rand" + "strconv" - "github.com/google/uuid" - objecttest "github.com/nspcc-dev/neofs-api-go/v2/object/test" checksumtest "github.com/nspcc-dev/neofs-sdk-go/checksum/test" cidtest "github.com/nspcc-dev/neofs-sdk-go/container/id/test" + neofscryptotest "github.com/nspcc-dev/neofs-sdk-go/crypto/test" "github.com/nspcc-dev/neofs-sdk-go/object" - oid "github.com/nspcc-dev/neofs-sdk-go/object/id" oidtest "github.com/nspcc-dev/neofs-sdk-go/object/id/test" sessiontest "github.com/nspcc-dev/neofs-sdk-go/session/test" usertest "github.com/nspcc-dev/neofs-sdk-go/user/test" - "github.com/nspcc-dev/neofs-sdk-go/version" ) -// Range returns random object.Range. -func Range() object.Range { - x := object.NewRange() - - x.SetOffset(1024) - x.SetLength(2048) - - return *x -} - -// Attribute returns random object.Attribute. -func Attribute() object.Attribute { - x := object.NewAttribute("key", "value") - - return *x -} - -// SplitID returns random object.SplitID. -func SplitID() object.SplitID { - x := object.NewSplitID() - - x.SetUUID(uuid.New()) - - return *x -} - -func generate(t testing.TB, withParent bool) object.Object { - x := object.New() - ver := version.Current() - - x.SetID(oidtest.ID()) - tok := sessiontest.Object() - x.SetSessionToken(&tok) - x.SetPayload([]byte{1, 2, 3}) - owner := usertest.ID(t) - x.SetOwnerID(&owner) - x.SetContainerID(cidtest.ID()) - x.SetType(object.TypeTombstone) - x.SetVersion(&ver) - x.SetPayloadSize(111) - x.SetCreationEpoch(222) - x.SetPreviousID(oidtest.ID()) - x.SetParentID(oidtest.ID()) - x.SetChildren(oidtest.ID(), oidtest.ID()) - x.SetAttributes(Attribute(), Attribute()) - splitID := SplitID() - x.SetSplitID(&splitID) - x.SetPayloadChecksum(checksumtest.Checksum()) - x.SetPayloadHomomorphicHash(checksumtest.Checksum()) +func header(withParent bool) object.Header { + h := object.New(cidtest.ID(), usertest.ID()).Header + h.SetSessionToken(sessiontest.Object()) + h.SetType(object.Type(rand.Uint32())) + h.SetPayloadSize(rand.Uint64()) + h.SetCreationEpoch(rand.Uint64()) + h.SetPreviousSplitObject(oidtest.ID()) + h.SetFirstSplitObject(oidtest.ID()) + h.SetParentID(oidtest.ID()) + h.SetParentSignature(neofscryptotest.Signature()) + h.SetPayloadChecksum(checksumtest.Checksum()) + h.SetPayloadHomomorphicChecksum(checksumtest.Checksum()) + + nAttr := rand.Int() % 4 + for i := 0; i < nAttr; i++ { + si := strconv.Itoa(rand.Int()) + h.SetAttribute("attr_"+si, "val_"+si) + } if withParent { - par := generate(t, false) - x.SetParent(&par) + h.SetParentHeader(header(false)) } - return *x + return h } -// Raw returns random object.Object. -// Deprecated: (v1.0.0) use Object instead. -func Raw(t testing.TB) object.Object { - return Object(t) +// Header returns random object.Header. +func Header() object.Header { + return header(true) } // Object returns random object.Object. -func Object(t testing.TB) object.Object { - return generate(t, true) +func Object() object.Object { + payload := make([]byte, rand.Int()%32) + rand.Read(payload) + + obj := object.Object{Header: Header()} + obj.SetID(oidtest.ID()) + obj.SetSignature(neofscryptotest.Signature()) + obj.SetPayload(payload) + return obj } // Tombstone returns random object.Tombstone. func Tombstone() object.Tombstone { - x := object.NewTombstone() - - splitID := SplitID() - x.SetSplitID(&splitID) - x.SetExpirationEpoch(13) - x.SetMembers([]oid.ID{oidtest.ID(), oidtest.ID()}) - - return *x + var x object.Tombstone + x.SetMembers(oidtest.NIDs(rand.Int()%3 + 1)) + return x } // SplitInfo returns random object.SplitInfo. func SplitInfo() object.SplitInfo { - x := object.NewSplitInfo() - - splitID := SplitID() - x.SetSplitID(&splitID) - x.SetLink(oidtest.ID()) + var x object.SplitInfo + x.SetFirstPart(oidtest.ID()) x.SetLastPart(oidtest.ID()) - - return *x -} - -// SearchFilters returns random object.SearchFilters. -func SearchFilters() object.SearchFilters { - x := object.NewSearchFilters() - - x.AddObjectIDFilter(object.MatchStringEqual, oidtest.ID()) - x.AddObjectContainerIDFilter(object.MatchStringNotEqual, cidtest.ID()) - + x.SetLinker(oidtest.ID()) return x } // Lock returns random object.Lock. -func Lock() *object.Lock { +func Lock() object.Lock { var l object.Lock - l.WriteMembers([]oid.ID{oidtest.ID(), oidtest.ID()}) - - return &l + l.SetList(oidtest.NIDs(rand.Int()%3 + 1)) + return l } -// Link returns random object.Link. -func Link() *object.Link { - return (*object.Link)(objecttest.GenerateLink(false)) +// SplitChain returns random object.SplitChain. +func SplitChain() object.SplitChain { + els := make([]object.SplitChainElement, rand.Int()%3+1) + for i := range els { + els[i].SetID(oidtest.ID()) + els[i].SetPayloadSize(rand.Uint32()) + } + var x object.SplitChain + x.SetElements(els) + return x } diff --git a/object/test/generate_test.go b/object/test/generate_test.go new file mode 100644 index 000000000..47f0f15be --- /dev/null +++ b/object/test/generate_test.go @@ -0,0 +1,94 @@ +package objecttest_test + +import ( + "testing" + + apiobject "github.com/nspcc-dev/neofs-sdk-go/api/object" + "github.com/nspcc-dev/neofs-sdk-go/object" + objecttest "github.com/nspcc-dev/neofs-sdk-go/object/test" + "github.com/stretchr/testify/require" +) + +func TestHeader(t *testing.T) { + v := objecttest.Header() + require.NotEqual(t, v, objecttest.Header()) + + var v2 object.Header + require.NoError(t, v2.Unmarshal(v.Marshal())) + require.Equal(t, v, v2) + + var m apiobject.Header + v.WriteToV2(&m) + var v3 object.Header + require.NoError(t, v3.ReadFromV2(&m)) + require.Equal(t, v, v3) + + j, err := v.MarshalJSON() + require.NoError(t, err) + var v4 object.Header + require.NoError(t, v4.UnmarshalJSON(j)) + require.Equal(t, v, v4) +} + +func TestObject(t *testing.T) { + v := objecttest.Object() + require.NotEqual(t, v, objecttest.Object()) + + var v2 object.Object + require.NoError(t, v2.Unmarshal(v.Marshal())) + require.Equal(t, v, v2) + + var m apiobject.Object + v.WriteToV2(&m) + var v3 object.Object + require.NoError(t, v3.ReadFromV2(&m)) + require.Equal(t, v, v3) + + j, err := v.MarshalJSON() + require.NoError(t, err) + var v4 object.Object + require.NoError(t, v4.UnmarshalJSON(j)) + require.Equal(t, v, v4) +} + +func TestTombstone(t *testing.T) { + v := objecttest.Tombstone() + require.NotEqual(t, v, objecttest.Tombstone()) + + var v2 object.Tombstone + require.NoError(t, v2.Unmarshal(v.Marshal())) + require.Equal(t, v, v2) +} + +func TestSplitInfo(t *testing.T) { + v := objecttest.SplitInfo() + require.NotEqual(t, v, objecttest.SplitInfo()) + + var v2 object.SplitInfo + require.NoError(t, v2.Unmarshal(v.Marshal())) + require.Equal(t, v, v2) + + var m apiobject.SplitInfo + v.WriteToV2(&m) + var v3 object.SplitInfo + require.NoError(t, v3.ReadFromV2(&m)) + require.Equal(t, v, v3) +} + +func TestLock(t *testing.T) { + v := objecttest.Lock() + require.NotEqual(t, v, objecttest.Lock()) + + var v2 object.Lock + require.NoError(t, v2.Unmarshal(v.Marshal())) + require.Equal(t, v, v2) +} + +func TestSplitChain(t *testing.T) { + v := objecttest.SplitChain() + require.NotEqual(t, v, objecttest.SplitChain()) + + var v2 object.SplitChain + require.NoError(t, v2.Unmarshal(v.Marshal())) + require.Equal(t, v, v2) +} diff --git a/object/tombstone.go b/object/tombstone.go index 3d7886a43..c53f7211c 100644 --- a/object/tombstone.go +++ b/object/tombstone.go @@ -1,148 +1,118 @@ package object import ( - "github.com/nspcc-dev/neofs-api-go/v2/refs" - "github.com/nspcc-dev/neofs-api-go/v2/tombstone" + "errors" + "fmt" + + "github.com/nspcc-dev/neofs-sdk-go/api/refs" + "github.com/nspcc-dev/neofs-sdk-go/api/tombstone" oid "github.com/nspcc-dev/neofs-sdk-go/object/id" + "google.golang.org/protobuf/proto" ) -// Tombstone represents v2-compatible tombstone structure. -type Tombstone tombstone.Tombstone - -// NewTombstoneFromV2 wraps v2 [tombstone.Tombstone] message to [Tombstone]. -// -// Nil [tombstone.Tombstone] converts to nil. -func NewTombstoneFromV2(tV2 *tombstone.Tombstone) *Tombstone { - return (*Tombstone)(tV2) -} - -// NewTombstone creates and initializes blank [Tombstone]. -// -// Defaults: -// - exp: 0; -// - splitID: nil; -// - members: nil. -func NewTombstone() *Tombstone { - return NewTombstoneFromV2(new(tombstone.Tombstone)) +// Tombstone contains information about removed objects. Tombstone is stored and +// transmitted as payload of system NeoFS objects. +type Tombstone struct { + members []oid.ID + exp uint64 // deprecated + splitID []byte // deprecated } -// ToV2 converts [Tombstone] to v2 [tombstone.Tombstone] message. +// readFromV2 reads Tombstone from the [tombstone.Tombstone] message. Returns an +// error if the message is malformed according to the NeoFS API V2 protocol. The +// message must not be nil. // -// Nil [Tombstone] converts to nil. +// ReadFromV2 is intended to be used by the NeoFS API V2 client/server +// implementation only and is not expected to be directly used by applications. // -// The value returned shares memory with the structure itself, so changing it can lead to data corruption. -// Make a copy if you need to change it. -func (t *Tombstone) ToV2() *tombstone.Tombstone { - return (*tombstone.Tombstone)(t) -} +// See also writeToV2. +func (t *Tombstone) readFromV2(m *tombstone.Tombstone) error { + if len(m.Members) == 0 { + return errors.New("missing members") + } -// ExpirationEpoch returns the last NeoFS epoch number of the tombstone lifetime. -// -// See also [Tombstone.SetExpirationEpoch]. -func (t *Tombstone) ExpirationEpoch() uint64 { - return (*tombstone.Tombstone)(t).GetExpirationEpoch() -} + if ln := len(m.SplitId); ln > 0 && ln != 16 { + return fmt.Errorf("invalid split ID length %d", ln) + } -// SetExpirationEpoch sets the last NeoFS epoch number of the tombstone lifetime. -// -// See also [Tombstone.ExpirationEpoch]. -func (t *Tombstone) SetExpirationEpoch(v uint64) { - (*tombstone.Tombstone)(t).SetExpirationEpoch(v) + t.members = make([]oid.ID, len(m.Members)) + for i := range m.Members { + if m.Members[i] == nil { + return fmt.Errorf("member #%d is nil", i) + } + err := t.members[i].ReadFromV2(m.Members[i]) + if err != nil { + return fmt.Errorf("invalid member #%d: %w", i, err) + } + } + + t.exp = m.ExpirationEpoch + t.splitID = m.SplitId + + return nil } -// SplitID returns identifier of object split hierarchy. +// writeToV2 writes Tombstone to the [tombstone.Tombstone] message of the NeoFS +// API protocol. // -// The value returned shares memory with the structure itself, so changing it can lead to data corruption. -// Make a copy if you need to change it. +// WriteToV2 is intended to be used by the NeoFS API V2 client/server +// implementation only and is not expected to be directly used by applications. // -// See also [Tombstone.SetSplitID]. -func (t *Tombstone) SplitID() *SplitID { - return NewSplitIDFromV2( - (*tombstone.Tombstone)(t).GetSplitID()) -} +// See also readFromV2. +func (t Tombstone) writeToV2(m *tombstone.Tombstone) { + if t.members != nil { + m.Members = make([]*refs.ObjectID, len(t.members)) + for i := range t.members { + m.Members[i] = new(refs.ObjectID) + t.members[i].WriteToV2(m.Members[i]) + } + } else { + m.Members = nil + } -// SetSplitID sets identifier of object split hierarchy. -// -// See also [Tombstone.SplitID]. -func (t *Tombstone) SetSplitID(v *SplitID) { - (*tombstone.Tombstone)(t).SetSplitID(v.ToV2()) + m.ExpirationEpoch = t.exp + m.SplitId = t.splitID } // Members returns list of objects to be deleted. // // See also [Tombstone.SetMembers]. -func (t *Tombstone) Members() []oid.ID { - v2 := (*tombstone.Tombstone)(t) - msV2 := v2.GetMembers() - - if msV2 == nil { - return nil - } - - var ( - ms = make([]oid.ID, len(msV2)) - id oid.ID - ) - - for i := range msV2 { - _ = id.ReadFromV2(msV2[i]) - ms[i] = id - } - - return ms +func (t Tombstone) Members() []oid.ID { + return t.members } // SetMembers sets list of objects to be deleted. // // See also [Tombstone.Members]. func (t *Tombstone) SetMembers(v []oid.ID) { - var ms []refs.ObjectID - - if v != nil { - ms = (*tombstone.Tombstone)(t). - GetMembers() - - if ln := len(v); cap(ms) >= ln { - ms = ms[:0] - } else { - ms = make([]refs.ObjectID, 0, ln) - } - - var idV2 refs.ObjectID - - for i := range v { - v[i].WriteToV2(&idV2) - ms = append(ms, idV2) - } - } - - (*tombstone.Tombstone)(t).SetMembers(ms) + t.members = v } -// Marshal marshals [Tombstone] into a protobuf binary form. +// Marshal encodes Tombstone into a Protocol Buffers V3 binary format. // // See also [Tombstone.Unmarshal]. -func (t *Tombstone) Marshal() ([]byte, error) { - return (*tombstone.Tombstone)(t).StableMarshal(nil), nil +func (t Tombstone) Marshal() []byte { + var m tombstone.Tombstone + t.writeToV2(&m) + + b, err := proto.Marshal(&m) + if err != nil { + // while it is bad to panic on external package return, we can do nothing better + // for this case: how can a normal message not be encoded? + panic(fmt.Errorf("unexpected marshal protobuf message failure: %w", err)) + } + return b } -// Unmarshal unmarshals protobuf binary representation of [Tombstone]. +// Unmarshal decodes Protocol Buffers V3 binary data into the Tombstone. Returns +// an error if the message is malformed according to the NeoFS API V2 protocol. // // See also [Tombstone.Marshal]. func (t *Tombstone) Unmarshal(data []byte) error { - return (*tombstone.Tombstone)(t).Unmarshal(data) -} - -// MarshalJSON encodes [Tombstone] to protobuf JSON format. -// -// See also [Tombstone.UnmarshalJSON]. -func (t *Tombstone) MarshalJSON() ([]byte, error) { - return (*tombstone.Tombstone)(t).MarshalJSON() -} - -// UnmarshalJSON decodes [Tombstone] from protobuf JSON format. -// -// See also [Tombstone.MarshalJSON]. -func (t *Tombstone) UnmarshalJSON(data []byte) error { - return (*tombstone.Tombstone)(t).UnmarshalJSON(data) + var m tombstone.Tombstone + err := proto.Unmarshal(data, &m) + if err != nil { + return fmt.Errorf("decode protobuf: %w", err) + } + return t.readFromV2(&m) } diff --git a/object/tombstone_test.go b/object/tombstone_test.go index 2b9d5beec..4886d3219 100644 --- a/object/tombstone_test.go +++ b/object/tombstone_test.go @@ -1,96 +1,3 @@ -package object +package object_test -import ( - "crypto/sha256" - "math/rand" - "testing" - - "github.com/nspcc-dev/neofs-api-go/v2/tombstone" - oid "github.com/nspcc-dev/neofs-sdk-go/object/id" - "github.com/stretchr/testify/require" -) - -func generateIDList(sz int) []oid.ID { - res := make([]oid.ID, sz) - cs := [sha256.Size]byte{} - - for i := 0; i < sz; i++ { - var oID oid.ID - - res[i] = oID - //nolint:staticcheck - rand.Read(cs[:]) - res[i].SetSHA256(cs) - } - - return res -} - -func TestTombstone(t *testing.T) { - ts := NewTombstone() - - exp := uint64(13) - ts.SetExpirationEpoch(exp) - require.Equal(t, exp, ts.ExpirationEpoch()) - - splitID := NewSplitID() - ts.SetSplitID(splitID) - require.Equal(t, splitID, ts.SplitID()) - - members := generateIDList(3) - ts.SetMembers(members) - require.Equal(t, members, ts.Members()) -} - -func TestTombstoneEncoding(t *testing.T) { - ts := NewTombstone() - ts.SetExpirationEpoch(13) - ts.SetSplitID(NewSplitID()) - ts.SetMembers(generateIDList(5)) - - t.Run("binary", func(t *testing.T) { - data, err := ts.Marshal() - require.NoError(t, err) - - ts2 := NewTombstone() - require.NoError(t, ts2.Unmarshal(data)) - - require.Equal(t, ts, ts2) - }) - - t.Run("json", func(t *testing.T) { - data, err := ts.MarshalJSON() - require.NoError(t, err) - - ts2 := NewTombstone() - require.NoError(t, ts2.UnmarshalJSON(data)) - - require.Equal(t, ts, ts2) - }) -} - -func TestNewTombstoneFromV2(t *testing.T) { - t.Run("from nil", func(t *testing.T) { - var x *tombstone.Tombstone - - require.Nil(t, NewTombstoneFromV2(x)) - }) -} - -func TestNewTombstone(t *testing.T) { - t.Run("default values", func(t *testing.T) { - ts := NewTombstone() - - // check initial values - require.Nil(t, ts.SplitID()) - require.Nil(t, ts.Members()) - require.Zero(t, ts.ExpirationEpoch()) - - // convert to v2 message - tsV2 := ts.ToV2() - - require.Nil(t, tsV2.GetSplitID()) - require.Nil(t, tsV2.GetMembers()) - require.Zero(t, tsV2.GetExpirationEpoch()) - }) -} +// TODO diff --git a/object/type.go b/object/type.go index dfcf197ed..cb85559dd 100644 --- a/object/type.go +++ b/object/type.go @@ -1,63 +1,86 @@ package object import ( - "github.com/nspcc-dev/neofs-api-go/v2/object" + "fmt" + "strconv" ) -// Type is an enumerator for possible object types. -type Type object.Type +// Type defines the payload format of the object. +type Type uint16 +// Supported Type values. const ( - TypeRegular Type = iota - TypeTombstone - TypeStorageGroup - TypeLock - TypeLink + TypeRegular Type = iota // uninterpretable plain data + TypeTombstone // [Tombstone] carrier + TypeStorageGroup // storage group carrier + TypeLock // [Lock] carrier + TypeLink // [SplitChain] carrier ) -// ToV2 converts [Type] to v2 [object.Type]. -func (t Type) ToV2() object.Type { - return object.Type(t) -} - -// TypeFromV2 converts v2 [object.Type] to [Type]. -func TypeFromV2(t object.Type) Type { - return Type(t) -} - -// EncodeToString returns string representation of [Type]. +// EncodeToString encodes Type into NeoFS API V2 protocol string. // -// String mapping: -// - [TypeTombstone]: TOMBSTONE; -// - [TypeStorageGroup]: STORAGE_GROUP; -// - [TypeLock]: LOCK; -// - [TypeRegular], default: REGULAR. -// - [TypeLink], default: LINK. +// See also [Type.DecodeString]. func (t Type) EncodeToString() string { - return t.ToV2().String() + switch t { + default: + return strconv.FormatUint(uint64(t), 10) + case TypeRegular: + return "REGULAR" + case TypeTombstone: + return "TOMBSTONE" + case TypeStorageGroup: + return "STORAGE_GROUP" + case TypeLock: + return "LOCK" + case TypeLink: + return "LINK" + } } // String implements [fmt.Stringer]. // // String is designed to be human-readable, and its format MAY differ between -// SDK versions. String MAY return same result as [Type.EncodeToString]. String MUST NOT -// be used to encode ID into NeoFS protocol string. +// SDK versions. String MAY return same result as [Type.EncodeToString]. String +// MUST NOT be used to encode Type into NeoFS protocol string. func (t Type) String() string { - return t.EncodeToString() + switch t { + default: + return fmt.Sprintf("UNKNOWN#%d", t) + case TypeRegular: + return "REGULAR" + case TypeTombstone: + return "TOMBSTONE" + case TypeStorageGroup: + return "STORAGE_GROUP" + case TypeLock: + return "LOCK" + case TypeLink: + return "LINK" + } } -// DecodeString parses [Type] from a string representation. -// It is a reverse action to EncodeToString(). +// DecodeString decodes string into Type according to NeoFS API protocol. +// Returns an error if s is malformed. // -// Returns true if s was parsed successfully. -func (t *Type) DecodeString(s string) bool { - var g object.Type - - ok := g.FromString(s) - - if ok { - *t = TypeFromV2(g) +// See also [Type.EncodeToString]. +func (t *Type) DecodeString(s string) error { + switch s { + default: + n, err := strconv.ParseUint(s, 10, 32) + if err != nil { + return fmt.Errorf("decode numeric value: %w", err) + } + *t = Type(n) + case "REGULAR": + *t = TypeRegular + case "TOMBSTONE": + *t = TypeTombstone + case "STORAGE_GROUP": + *t = TypeStorageGroup + case "LOCK": + *t = TypeLock + case "LINK": + *t = TypeLink } - - return ok + return nil } diff --git a/object/type_test.go b/object/type_test.go index 621719c6c..4886d3219 100644 --- a/object/type_test.go +++ b/object/type_test.go @@ -1,89 +1,3 @@ package object_test -import ( - "testing" - - v2object "github.com/nspcc-dev/neofs-api-go/v2/object" - "github.com/nspcc-dev/neofs-sdk-go/object" - "github.com/stretchr/testify/require" -) - -func TestType_ToV2(t *testing.T) { - typs := []struct { - t object.Type - t2 v2object.Type - }{ - { - t: object.TypeRegular, - t2: v2object.TypeRegular, - }, - { - t: object.TypeTombstone, - t2: v2object.TypeTombstone, - }, - { - t: object.TypeStorageGroup, - t2: v2object.TypeStorageGroup, - }, - { - t: object.TypeLock, - t2: v2object.TypeLock, - }, - { - t: object.TypeLink, - t2: v2object.TypeLink, - }, - } - - for _, item := range typs { - t2 := item.t.ToV2() - - require.Equal(t, item.t2, t2) - - require.Equal(t, item.t, object.TypeFromV2(item.t2)) - } -} - -func TestType_String(t *testing.T) { - toPtr := func(v object.Type) *object.Type { - return &v - } - - testEnumStrings(t, new(object.Type), []enumStringItem{ - {val: toPtr(object.TypeTombstone), str: "TOMBSTONE"}, - {val: toPtr(object.TypeStorageGroup), str: "STORAGE_GROUP"}, - {val: toPtr(object.TypeRegular), str: "REGULAR"}, - {val: toPtr(object.TypeLock), str: "LOCK"}, - {val: toPtr(object.TypeLink), str: "LINK"}, - }) -} - -type enumIface interface { - DecodeString(string) bool - EncodeToString() string -} - -type enumStringItem struct { - val enumIface - str string -} - -func testEnumStrings(t *testing.T, e enumIface, items []enumStringItem) { - for _, item := range items { - require.Equal(t, item.str, item.val.EncodeToString()) - - s := item.val.EncodeToString() - - require.True(t, e.DecodeString(s), s) - - require.EqualValues(t, item.val, e, item.val) - } - - // incorrect strings - for _, str := range []string{ - "some string", - "undefined", - } { - require.False(t, e.DecodeString(str)) - } -} +// TODO diff --git a/object/wellknown_attributes.go b/object/wellknown_attributes.go deleted file mode 100644 index efece3885..000000000 --- a/object/wellknown_attributes.go +++ /dev/null @@ -1,28 +0,0 @@ -package object - -const ( - // AttributeName is an attribute key that is commonly used to denote - // human-friendly name. - AttributeName = "Name" - - // AttributeFileName is an attribute key that is commonly used to denote - // file name to be associated with the object on saving. - AttributeFileName = "FileName" - - // AttributeFilePath is an attribute key that is commonly used to denote - // full path to be associated with the object on saving. Should start with a - // '/' and use '/' as a delimiting symbol. Trailing '/' should be - // interpreted as a virtual directory marker. If an object has conflicting - // FilePath and FileName, FilePath should have higher priority, because it - // is used to construct the directory tree. FilePath with trailing '/' and - // non-empty FileName attribute should not be used together. - AttributeFilePath = "FilePath" - - // AttributeTimestamp is an attribute key that is commonly used to denote - // user-defined local time of object creation in Unix Timestamp format. - AttributeTimestamp = "Timestamp" - - // AttributeContentType is an attribute key that is commonly used to denote - // MIME Content Type of object's payload. - AttributeContentType = "Content-Type" -) diff --git a/reputation/example_test.go b/reputation/example_test.go deleted file mode 100644 index 675969b71..000000000 --- a/reputation/example_test.go +++ /dev/null @@ -1,20 +0,0 @@ -package reputation_test - -import ( - apiGoReputation "github.com/nspcc-dev/neofs-api-go/v2/reputation" - "github.com/nspcc-dev/neofs-sdk-go/reputation" -) - -// Instances can be also used to process NeoFS API V2 protocol messages with [https://github.com/nspcc-dev/neofs-api] package. -func ExampleGlobalTrust_marshalling() { - // import apiGoReputation "github.com/nspcc-dev/neofs-api-go/v2/reputation" - - // On the client side. - var trust reputation.GlobalTrust - var msg apiGoReputation.GlobalTrust - trust.WriteToV2(&msg) - // *send message* - - // On the server side. - _ = trust.ReadFromV2(msg) -} diff --git a/reputation/peer.go b/reputation/peer.go index b23975274..a6094d2c8 100644 --- a/reputation/peer.go +++ b/reputation/peer.go @@ -1,111 +1,93 @@ package reputation import ( - "bytes" "errors" "fmt" "github.com/mr-tron/base58" - "github.com/nspcc-dev/neofs-api-go/v2/reputation" + "github.com/nspcc-dev/neofs-sdk-go/api/reputation" ) +// PeerIDSize is an ID size of the peer participating in the NeoFS reputation +// system. +const PeerIDSize = 33 + // PeerID represents unique identifier of the peer participating in the NeoFS -// reputation system. +// reputation system. PeerID corresponds to the binary-encoded public key in +// a format similar to the NeoFS network map. +// +// PeerID implements built-in comparable interface. // -// ID is mutually compatible with github.com/nspcc-dev/neofs-api-go/v2/reputation.PeerID -// message. See ReadFromV2 / WriteToV2 methods. +// ID is mutually compatible with [reputation.PeerID] message. See +// [PeerID.ReadFromV2] / [PeerID.WriteToV2] methods. // // Instances can be created using built-in var declaration. -type PeerID struct { - m reputation.PeerID -} +type PeerID [PeerIDSize]byte -// ReadFromV2 reads PeerID from the reputation.PeerID message. Returns an -// error if the message is malformed according to the NeoFS API V2 protocol. -// -// See also WriteToV2. -func (x *PeerID) ReadFromV2(m reputation.PeerID) error { - val := m.GetPublicKey() - if len(val) == 0 { - return errors.New("missing ID bytes") +func (x *PeerID) decodeBinary(b []byte) error { + if len(b) != PeerIDSize { + return fmt.Errorf("invalid value length %d", len(b)) } - - x.m = m - + copy(x[:], b) return nil } -// WriteToV2 writes PeerID to the reputation.PeerID message. -// The message must not be nil. -// -// See also ReadFromV2. -func (x PeerID) WriteToV2(m *reputation.PeerID) { - *m = x.m -} - -// SetPublicKey sets [PeerID] as a binary-encoded public key which authenticates -// the participant of the NeoFS reputation system. +// ReadFromV2 reads PeerID from the reputation.PeerID message. Returns an error +// if the message is malformed according to the NeoFS API V2 protocol. The +// message must not be nil. // -// Argument MUST NOT be mutated, make a copy first. +// ReadFromV2 is intended to be used by the NeoFS API V2 client/server +// implementation only and is not expected to be directly used by applications. // -// Parameter key is a serialized compressed public key. See [elliptic.MarshalCompressed]. -// -// See also [ComparePeerKey]. -func (x *PeerID) SetPublicKey(key []byte) { - x.m.SetPublicKey(key) +// See also [PeerID.WriteToV2]. +func (x *PeerID) ReadFromV2(m *reputation.PeerID) error { + if len(m.PublicKey) == 0 { + return errors.New("missing value field") + } + return x.decodeBinary(m.PublicKey) } -// PublicKey return public key set using [PeerID.SetPublicKey]. -// -// Zero [PeerID] has zero key which is incorrect according to NeoFS API +// WriteToV2 writes PeerID to the reputation.PeerID message of the NeoFS API // protocol. // -// The resulting slice of bytes is a serialized compressed public key. See [elliptic.MarshalCompressed]. -// Use [neofsecdsa.PublicKey.Decode] to decode it into a type-specific structure. -// -// The value returned shares memory with the structure itself, so changing it can lead to data corruption. -// Make a copy if you need to change it. -func (x PeerID) PublicKey() []byte { - return x.m.GetPublicKey() -} - -// ComparePeerKey checks if the given PeerID corresponds to the party -// authenticated by the given binary public key. +// WriteToV2 is intended to be used by the NeoFS API V2 client/server +// implementation only and is not expected to be directly used by applications. // -// The key parameter is a slice of bytes is a serialized compressed public key. See [elliptic.MarshalCompressed]. -func ComparePeerKey(peer PeerID, key []byte) bool { - return bytes.Equal(peer.PublicKey(), key) +// See also [PeerID.ReadFromV2]. +func (x PeerID) WriteToV2(m *reputation.PeerID) { + m.PublicKey = x[:] } -// EncodeToString encodes ID into NeoFS API protocol string. +// EncodeToString encodes PeerID into NeoFS API V2 protocol string. // -// Zero PeerID is base58 encoding of PeerIDSize zeros. +// Zero ID is base58 encoding of [PeerIDSize] zeros. // -// See also DecodeString. +// See also [PeerID.DecodeString]. func (x PeerID) EncodeToString() string { - return base58.Encode(x.m.GetPublicKey()) + return base58.Encode(x[:]) } // DecodeString decodes string into PeerID according to NeoFS API protocol. // Returns an error if s is malformed. // -// See also DecodeString. +// See also [PeerID.EncodeToString]. func (x *PeerID) DecodeString(s string) error { - data, err := base58.Decode(s) - if err != nil { - return fmt.Errorf("decode base58: %w", err) + var b []byte + if s != "" { + var err error + b, err = base58.Decode(s) + if err != nil { + return fmt.Errorf("decode base58: %w", err) + } } - - x.m.SetPublicKey(data) - - return nil + return x.decodeBinary(b) } -// String implements fmt.Stringer. +// String implements [fmt.Stringer]. // // String is designed to be human-readable, and its format MAY differ between -// SDK versions. String MAY return same result as EncodeToString. String MUST NOT -// be used to encode ID into NeoFS protocol string. +// SDK versions. String MAY return same result as EncodeToString. String MUST +// NOT be used to encode PeerID into NeoFS protocol string. func (x PeerID) String() string { return x.EncodeToString() } diff --git a/reputation/peer_test.go b/reputation/peer_test.go index bfee237a0..270aea5ef 100644 --- a/reputation/peer_test.go +++ b/reputation/peer_test.go @@ -3,38 +3,104 @@ package reputation_test import ( "testing" - v2reputation "github.com/nspcc-dev/neofs-api-go/v2/reputation" + apireputation "github.com/nspcc-dev/neofs-sdk-go/api/reputation" "github.com/nspcc-dev/neofs-sdk-go/reputation" reputationtest "github.com/nspcc-dev/neofs-sdk-go/reputation/test" "github.com/stretchr/testify/require" ) -func TestPeerID_PublicKey(t *testing.T) { - var val reputation.PeerID +func TestPeerIDComparable(t *testing.T) { + id1 := reputationtest.PeerID() + require.True(t, id1 == id1) + id2 := reputationtest.ChangePeerID(id1) + require.NotEqual(t, id1, id2) + require.False(t, id1 == id2) +} - require.Zero(t, val.PublicKey()) +func TestPeerID_String(t *testing.T) { + id1 := reputationtest.PeerID() + id2 := reputationtest.ChangePeerID(id1) + require.NotEmpty(t, id1.String()) + require.Equal(t, id1.String(), id1.String()) + require.NotEqual(t, id1.String(), id2.String()) +} - key := []byte{3, 2, 1} +func TestPeerID_ReadFromV2(t *testing.T) { + t.Run("missing fields", func(t *testing.T) { + t.Run("value", func(t *testing.T) { + id := reputationtest.PeerID() + var m apireputation.PeerID - val.SetPublicKey(key) + id.WriteToV2(&m) + m.PublicKey = nil + require.ErrorContains(t, id.ReadFromV2(&m), "missing value field") + m.PublicKey = []byte{} + require.ErrorContains(t, id.ReadFromV2(&m), "missing value field") + }) + }) + t.Run("invalid fields", func(t *testing.T) { + t.Run("value", func(t *testing.T) { + id := reputationtest.PeerID() + var m apireputation.PeerID - var m v2reputation.PeerID - val.WriteToV2(&m) + id.WriteToV2(&m) + m.PublicKey = make([]byte, 32) + require.ErrorContains(t, id.ReadFromV2(&m), "invalid value length 32") + m.PublicKey = make([]byte, 34) + require.ErrorContains(t, id.ReadFromV2(&m), "invalid value length 34") + }) + }) +} - require.Equal(t, key, m.GetPublicKey()) +func TestPeerID_DecodeString(t *testing.T) { + var id reputation.PeerID - var val2 reputation.PeerID - require.NoError(t, val2.ReadFromV2(m)) + const zeroIDString = "111111111111111111111111111111111" + require.Equal(t, zeroIDString, id.EncodeToString()) - require.Equal(t, key, val.PublicKey()) + var bin = [33]byte{106, 6, 81, 91, 166, 102, 170, 186, 188, 108, 51, 93, 37, 154, 31, 156, 67, 97, 148, 186, 222, 175, 255, 251, 153, 158, 211, 222, 251, 168, 26, 141, 16} + const str = "YVmEgnwZTZsnXFnRKsrk88LAfj4YKm1B83LzR6GCcnCvj" + require.NoError(t, id.DecodeString(str)) + require.Equal(t, str, id.EncodeToString()) + require.EqualValues(t, bin, id) - require.True(t, reputation.ComparePeerKey(val, key)) -} + var binOther = [33]byte{14, 5, 25, 39, 25, 170, 76, 164, 133, 133, 150, 101, 89, 226, 39, 70, 35, 200, 81, 200, 121, 104, 205, 74, 36, 179, 14, 151, 244, 135, 93, 244, 229} + const strOther = "5AZLUQPv8CuUHSqqcFqWf3UdWLP46zxc5Z4riS3gxmkPn" + require.NoError(t, id.DecodeString(strOther)) + require.Equal(t, strOther, id.EncodeToString()) + require.EqualValues(t, binOther, id) + + t.Run("invalid", func(t *testing.T) { + var id reputation.PeerID + for _, testCase := range []struct{ input, err string }{ + {input: "not_a_base58_string", err: "decode base58"}, + {input: "", err: "invalid value length 0"}, + {input: "zJd3YyeBk9o6q281fE81LxiYucTbdToLK3RzdMR8quc", err: "invalid value length 32"}, + } { + require.ErrorContains(t, id.DecodeString(testCase.input), testCase.err, testCase) + } + }) + t.Run("encoding", func(t *testing.T) { + t.Run("api", func(t *testing.T) { + var src, dst reputation.PeerID + var msg apireputation.PeerID + + require.NoError(t, dst.DecodeString(str)) + + dst[0]++ + src.WriteToV2(&msg) + require.Equal(t, make([]byte, 33), msg.PublicKey) + require.NoError(t, dst.ReadFromV2(&msg)) + require.Zero(t, dst) -func TestPeerID_EncodeToString(t *testing.T) { - val := reputationtest.PeerID() - var val2 reputation.PeerID + require.NoError(t, src.DecodeString(str)) - require.NoError(t, val2.DecodeString(val.EncodeToString())) - require.Equal(t, val, val2) + src.WriteToV2(&msg) + require.Equal(t, bin[:], msg.PublicKey) + err := dst.ReadFromV2(&msg) + require.NoError(t, err) + require.EqualValues(t, bin, dst) + require.Equal(t, str, dst.EncodeToString()) + }) + }) } diff --git a/reputation/test/generate.go b/reputation/test/generate.go index 83d1f761c..aa53f81ab 100644 --- a/reputation/test/generate.go +++ b/reputation/test/generate.go @@ -1,52 +1,65 @@ package reputationtest import ( - "testing" + "fmt" + "math/rand" - "github.com/nspcc-dev/neo-go/pkg/crypto/keys" - "github.com/nspcc-dev/neofs-sdk-go/crypto/test" + neofscryptotest "github.com/nspcc-dev/neofs-sdk-go/crypto/test" "github.com/nspcc-dev/neofs-sdk-go/reputation" ) -func PeerID() (v reputation.PeerID) { - p, err := keys.NewPrivateKey() - if err != nil { - panic(err) - } - - v.SetPublicKey(p.PublicKey().Bytes()) +// PeerID returns random reputation.PeerID. +func PeerID() reputation.PeerID { + var res reputation.PeerID + rand.Read(res[:]) + return res +} - return +// ChangePeerID returns reputation.PeerID other than the given one. +func ChangePeerID(id reputation.PeerID) reputation.PeerID { + id[0]++ + return id } -func Trust() (v reputation.Trust) { - v.SetPeer(PeerID()) - v.SetValue(0.5) +// Trust returns random reputation.Trust. +func Trust() reputation.Trust { + var res reputation.Trust + res.SetPeer(PeerID()) + res.SetValue(rand.Float64()) + return res +} - return +// NTrusts returns n random reputation.Trust instances. +func NTrusts(n int) []reputation.Trust { + res := make([]reputation.Trust, n) + for i := range res { + res[i] = Trust() + } + return res } -func PeerToPeerTrust() (v reputation.PeerToPeerTrust) { +// PeerToPeerTrust returns random reputation.PeerToPeerTrust. +func PeerToPeerTrust() reputation.PeerToPeerTrust { + var v reputation.PeerToPeerTrust v.SetTrustingPeer(PeerID()) v.SetTrust(Trust()) - - return + return v } -func GlobalTrust() (v reputation.GlobalTrust) { - v.Init() - v.SetManager(PeerID()) - v.SetTrust(Trust()) - - return +func globalTrustUnsigned() reputation.GlobalTrust { + return reputation.NewGlobalTrust(PeerID(), Trust()) } -func SignedGlobalTrust(t *testing.T) reputation.GlobalTrust { - gt := GlobalTrust() - - if err := gt.Sign(test.RandomSignerRFC6979(t)); err != nil { - t.Fatalf("unexpected error from GlobalTrust.Sign: %v", err) +// GlobalTrust returns random reputation.GlobalTrust. +func GlobalTrust() reputation.GlobalTrust { + tr := globalTrustUnsigned() + if err := tr.Sign(neofscryptotest.RandomSigner()); err != nil { + panic(fmt.Errorf("unexpected sign error: %w", err)) } + return tr +} - return gt +// GlobalTrustUnsigned returns random unsigned reputation.GlobalTrust. +func GlobalTrustUnsigned() reputation.GlobalTrust { + return globalTrustUnsigned() } diff --git a/reputation/test/generate_test.go b/reputation/test/generate_test.go new file mode 100644 index 000000000..72f70c5e2 --- /dev/null +++ b/reputation/test/generate_test.go @@ -0,0 +1,63 @@ +package reputationtest_test + +import ( + "testing" + + apireputation "github.com/nspcc-dev/neofs-sdk-go/api/reputation" + "github.com/nspcc-dev/neofs-sdk-go/reputation" + reputationtest "github.com/nspcc-dev/neofs-sdk-go/reputation/test" + "github.com/stretchr/testify/require" +) + +func TestPeerID(t *testing.T) { + id := reputationtest.PeerID() + require.NotEqual(t, id, reputationtest.PeerID()) + + var m apireputation.PeerID + id.WriteToV2(&m) + var id2 reputation.PeerID + require.NoError(t, id2.ReadFromV2(&m)) + require.Equal(t, id, id2) +} + +func TestTrust(t *testing.T) { + tr := reputationtest.Trust() + require.NotEqual(t, tr, reputationtest.Trust()) + + var m apireputation.Trust + tr.WriteToV2(&m) + var tr2 reputation.Trust + require.NoError(t, tr2.ReadFromV2(&m)) + require.Equal(t, tr, tr2) +} + +func TestPeerToPeerTrust(t *testing.T) { + tr := reputationtest.PeerToPeerTrust() + require.NotEqual(t, tr, reputationtest.PeerToPeerTrust()) + + var m apireputation.PeerToPeerTrust + tr.WriteToV2(&m) + var tr2 reputation.PeerToPeerTrust + require.NoError(t, tr2.ReadFromV2(&m)) + require.Equal(t, tr, tr2) +} + +func TestGlobalTrust(t *testing.T) { + v := reputationtest.GlobalTrust() + require.NotEqual(t, v, reputationtest.GlobalTrust()) + require.True(t, v.VerifySignature()) + + var v2 reputation.GlobalTrust + require.NoError(t, v2.Unmarshal(v.Marshal())) + require.Equal(t, v, v2) +} + +func TestGlobalTrustUnsigned(t *testing.T) { + v := reputationtest.GlobalTrustUnsigned() + require.NotEqual(t, v, reputationtest.GlobalTrustUnsigned()) + require.False(t, v.VerifySignature()) + + var v2 reputation.GlobalTrust + require.NoError(t, v2.Unmarshal(v.Marshal())) + require.Equal(t, v, v2) +} diff --git a/reputation/trust.go b/reputation/trust.go index b95763287..adb43eea6 100644 --- a/reputation/trust.go +++ b/reputation/trust.go @@ -4,357 +4,379 @@ import ( "errors" "fmt" - "github.com/nspcc-dev/neofs-api-go/v2/refs" - "github.com/nspcc-dev/neofs-api-go/v2/reputation" + "github.com/nspcc-dev/neofs-sdk-go/api/refs" + "github.com/nspcc-dev/neofs-sdk-go/api/reputation" neofscrypto "github.com/nspcc-dev/neofs-sdk-go/crypto" "github.com/nspcc-dev/neofs-sdk-go/version" + "google.golang.org/protobuf/proto" ) // Trust represents quantitative assessment of the trust of a participant in the // NeoFS reputation system. // -// Trust is mutually compatible with github.com/nspcc-dev/neofs-api-go/v2/reputation.Trust -// message. See ReadFromV2 / WriteToV2 methods. +// Trust is mutually compatible with [reputation.Trust] message. See +// [Trust.ReadFromV2] / [Trust.WriteToV2] methods. // // Instances can be created using built-in var declaration. type Trust struct { - m reputation.Trust + peerSet bool + peer PeerID + + val float64 } -// ReadFromV2 reads Trust from the reputation.Trust message. Returns an -// error if the message is malformed according to the NeoFS API V2 protocol. +// ReadFromV2 reads Trust from the reputation.Trust message. Returns an error if +// the message is malformed according to the NeoFS API V2 protocol. The message +// must not be nil. +// +// ReadFromV2 is intended to be used by the NeoFS API V2 client/server +// implementation only and is not expected to be directly used by applications. // -// See also WriteToV2. -func (x *Trust) ReadFromV2(m reputation.Trust) error { - if val := m.GetValue(); val < 0 || val > 1 { - return fmt.Errorf("invalid trust value %v", val) +// See also [Trust.WriteToV2]. +func (x *Trust) ReadFromV2(m *reputation.Trust) error { + if m.Value < 0 || m.Value > 1 { + return fmt.Errorf("invalid trust value %v", m.Value) } - peerV2 := m.GetPeer() - if peerV2 == nil { - return errors.New("missing peer field") + x.peerSet = m.Peer != nil + if !x.peerSet { + return errors.New("missing peer") } - var peer PeerID - - err := peer.ReadFromV2(*peerV2) + err := x.peer.ReadFromV2(m.Peer) if err != nil { - return fmt.Errorf("invalid peer field: %w", err) + return fmt.Errorf("invalid peer: %w", err) } - x.m = m + x.val = m.Value return nil } -// WriteToV2 writes Trust to the reputation.Trust message. -// The message must not be nil. +// WriteToV2 writes Trust to the reputation.Trust message of the NeoFS API +// protocol. // -// See also ReadFromV2. +// WriteToV2 is intended to be used by the NeoFS API V2 client/server +// implementation only and is not expected to be directly used by applications. +// +// See also [Trust.ReadFromV2]. func (x Trust) WriteToV2(m *reputation.Trust) { - *m = x.m + if x.peerSet { + m.Peer = new(reputation.PeerID) + x.peer.WriteToV2(m.Peer) + } else { + m.Peer = nil + } + m.Value = x.val } -// SetPeer specifies identifier of the participant of the NeoFS reputation system -// to which the Trust relates. +// SetPeer specifies identifier of the participant of the NeoFS reputation +// system to which the Trust relates. // -// See also Peer. +// See also [Trust.Peer]. func (x *Trust) SetPeer(id PeerID) { - var m reputation.PeerID - id.WriteToV2(&m) - - x.m.SetPeer(&m) + x.peer, x.peerSet = id, true } -// Peer returns peer identifier set using SetPeer. +// Peer returns identifier of the participant of the NeoFS reputation system to +// which the Trust relates. // // Zero Trust returns zero PeerID which is incorrect according to the NeoFS API // protocol. -func (x Trust) Peer() (res PeerID) { - m := x.m.GetPeer() - if m != nil { - err := res.ReadFromV2(*m) - if err != nil { - panic(fmt.Sprintf("unexpected error from ReadFromV2: %v", err)) - } +// +// See also [Trust.SetPeer]. +func (x Trust) Peer() PeerID { + if x.peerSet { + return x.peer } - - return + return PeerID{} } // SetValue sets the Trust value. Value MUST be in range [0;1]. // -// See also Value. +// See also [Trust.Value]. func (x *Trust) SetValue(val float64) { if val < 0 || val > 1 { panic(fmt.Sprintf("trust value is out-of-range %v", val)) } - - x.m.SetValue(val) + x.val = val } -// Value returns value set using SetValue. +// Value returns the Trust value. // // Zero Trust has zero value. +// +// See also [Trust.SetValue]. func (x Trust) Value() float64 { - return x.m.GetValue() + return x.val } // PeerToPeerTrust represents trust of one participant of the NeoFS reputation // system to another one. // -// Trust is mutually compatible with github.com/nspcc-dev/neofs-api-go/v2/reputation.PeerToPeerTrust -// message. See ReadFromV2 / WriteToV2 methods. +// Trust is mutually compatible with [reputation.PeerToPeerTrust] message. See +// [PeerToPeerTrust.ReadFromV2] / [PeerToPeerTrust.WriteToV2] methods. // // Instances can be created using built-in var declaration. type PeerToPeerTrust struct { - m reputation.PeerToPeerTrust + trustingPeerSet bool + trustingPeer PeerID + + valSet bool + val Trust } // ReadFromV2 reads PeerToPeerTrust from the reputation.PeerToPeerTrust message. // Returns an error if the message is malformed according to the NeoFS API V2 -// protocol. +// protocol. The message must not be nil. +// +// ReadFromV2 is intended to be used by the NeoFS API V2 client/server +// implementation only and is not expected to be directly used by applications. // -// See also WriteToV2. -func (x *PeerToPeerTrust) ReadFromV2(m reputation.PeerToPeerTrust) error { - trustingV2 := m.GetTrustingPeer() - if trustingV2 == nil { +// See also [PeerToPeerTrust.WriteToV2]. +func (x *PeerToPeerTrust) ReadFromV2(m *reputation.PeerToPeerTrust) error { + if x.trustingPeerSet = m.TrustingPeer != nil; x.trustingPeerSet { + err := x.trustingPeer.ReadFromV2(m.TrustingPeer) + if err != nil { + return fmt.Errorf("invalid trusting peer: %w", err) + } + } else { return errors.New("missing trusting peer") } - var trusting PeerID - - err := trusting.ReadFromV2(*trustingV2) - if err != nil { - return fmt.Errorf("invalid trusting peer: %w", err) - } - - trustV2 := m.GetTrust() - if trustV2 == nil { + if x.valSet = m.Trust != nil; x.valSet { + err := x.val.ReadFromV2(m.Trust) + if err != nil { + return fmt.Errorf("invalid trust: %w", err) + } + } else { return errors.New("missing trust") } - var trust Trust - - err = trust.ReadFromV2(*trustV2) - if err != nil { - return fmt.Errorf("invalid trust: %w", err) - } - - x.m = m - return nil } -// WriteToV2 writes PeerToPeerTrust to the reputation.PeerToPeerTrust message. -// The message must not be nil. +// WriteToV2 writes PeerToPeerTrust to the reputation.PeerToPeerTrust message of +// the NeoFS API protocol. // -// See also ReadFromV2. +// WriteToV2 is intended to be used by the NeoFS API V2 client/server +// implementation only and is not expected to be directly used by applications. +// +// See also [PeerToPeerTrust.ReadFromV2]. func (x PeerToPeerTrust) WriteToV2(m *reputation.PeerToPeerTrust) { - *m = x.m + if x.trustingPeerSet { + m.TrustingPeer = new(reputation.PeerID) + x.trustingPeer.WriteToV2(m.TrustingPeer) + } else { + m.TrustingPeer = nil + } + + if x.valSet { + m.Trust = new(reputation.Trust) + x.val.WriteToV2(m.Trust) + } else { + m.Trust = nil + } } // SetTrustingPeer specifies the peer from which trust comes in terms of the // NeoFS reputation system. // -// See also TrustingPeer. +// See also [PeerToPeerTrust.TrustingPeer]. func (x *PeerToPeerTrust) SetTrustingPeer(id PeerID) { - var m reputation.PeerID - id.WriteToV2(&m) - - x.m.SetTrustingPeer(&m) + x.trustingPeer, x.trustingPeerSet = id, true } -// TrustingPeer returns peer set using SetTrustingPeer. +// TrustingPeer returns the peer from which trust comes in terms of the NeoFS +// reputation system. // // Zero PeerToPeerTrust has no trusting peer which is incorrect according // to the NeoFS API protocol. -func (x PeerToPeerTrust) TrustingPeer() (res PeerID) { - m := x.m.GetTrustingPeer() - if m != nil { - err := res.ReadFromV2(*m) - if err != nil { - panic(fmt.Sprintf("unexpected error from PeerID.ReadFromV2: %v", err)) - } +// +// See also [PeerToPeerTrust.SetTrustingPeer]. +func (x PeerToPeerTrust) TrustingPeer() PeerID { + if x.trustingPeerSet { + return x.trustingPeer } - - return + return PeerID{} } // SetTrust sets trust value of the trusting peer to another participant // of the NeoFS reputation system. // -// See also Trust. +// See also [PeerToPeerTrust.Trust]. func (x *PeerToPeerTrust) SetTrust(t Trust) { - var tV2 reputation.Trust - t.WriteToV2(&tV2) - - x.m.SetTrust(&tV2) + x.val, x.valSet = t, true } -// Trust returns trust set using SetTrust. +// Trust returns trust value of the trusting peer to another participant of the +// NeoFS reputation system. // -// Zero PeerToPeerTrust returns zero Trust which is incorect according to the +// Zero PeerToPeerTrust returns zero Trust which is incorrect according to the // NeoFS API protocol. -func (x PeerToPeerTrust) Trust() (res Trust) { - m := x.m.GetTrust() - if m != nil { - err := res.ReadFromV2(*m) - if err != nil { - panic(fmt.Sprintf("unexpected error from Trust.ReadFromV2: %v", err)) - } +// +// See also [PeerToPeerTrust.SetTrust]. +func (x PeerToPeerTrust) Trust() Trust { + if x.valSet { + return x.val } - - return + return Trust{} } // GlobalTrust represents the final assessment of trust in the participant of // the NeoFS reputation system obtained taking into account all other participants. // -// GlobalTrust is mutually compatible with github.com/nspcc-dev/neofs-api-go/v2/reputation.GlobalTrust -// message. See ReadFromV2 / WriteToV2 methods. +// GlobalTrust is mutually compatible with [reputation.GlobalTrust] message. See +// [GlobalTrust.ReadFromV2] / [GlobalTrust.WriteToV2] methods. // -// To submit GlobalTrust value in NeoFS zero instance SHOULD be declared, -// initialized using Init method and filled using dedicated methods. +// To submit GlobalTrust value in NeoFS zero instance should be initialized via +// [NewGlobalTrust] and filled using dedicated methods. type GlobalTrust struct { - m reputation.GlobalTrust + versionSet bool + version version.Version + + managerSet bool + manager PeerID + + trustSet bool + trust Trust + + sigSet bool + sig neofscrypto.Signature } -// ReadFromV2 reads GlobalTrust from the reputation.GlobalTrust message. -// Returns an error if the message is malformed according to the NeoFS API V2 -// protocol. -// -// See also WriteToV2. -func (x *GlobalTrust) ReadFromV2(m reputation.GlobalTrust) error { - if m.GetVersion() == nil { +// NewGlobalTrust constructs new GlobalTrust instance. +func NewGlobalTrust(manager PeerID, trust Trust) GlobalTrust { + return GlobalTrust{ + versionSet: true, + version: version.Current, + managerSet: true, + manager: manager, + trustSet: true, + trust: trust, + } +} + +func (x *GlobalTrust) readFromV2(m *reputation.GlobalTrust) error { + if x.versionSet = m.Version != nil; x.versionSet { + err := x.version.ReadFromV2(m.Version) + if err != nil { + return fmt.Errorf("invalid version: %w", err) + } + } else { return errors.New("missing version") } - if m.GetSignature() == nil { - return errors.New("missing signature") + if x.sigSet = m.Signature != nil; x.sigSet { + err := x.sig.ReadFromV2(m.Signature) + if err != nil { + return fmt.Errorf("invalid signature: %w", err) + } } - body := m.GetBody() - if body == nil { + if m.Body == nil { return errors.New("missing body") } - managerV2 := body.GetManager() - if managerV2 == nil { + if x.managerSet = m.Body.Manager != nil; x.managerSet { + err := x.manager.ReadFromV2(m.Body.Manager) + if err != nil { + return fmt.Errorf("invalid manager: %w", err) + } + } else { return errors.New("missing manager") } - var manager PeerID - - err := manager.ReadFromV2(*managerV2) - if err != nil { - return fmt.Errorf("invalid manager: %w", err) - } - - trustV2 := body.GetTrust() - if trustV2 == nil { + if x.trustSet = m.Body.Trust != nil; x.trustSet { + err := x.trust.ReadFromV2(m.Body.Trust) + if err != nil { + return fmt.Errorf("invalid trust: %w", err) + } + } else { return errors.New("missing trust") } - var trust Trust - - err = trust.ReadFromV2(*trustV2) - if err != nil { - return fmt.Errorf("invalid trust: %w", err) - } - - x.m = m - return nil } -// WriteToV2 writes GlobalTrust to the reputation.GlobalTrust message. -// The message must not be nil. -// -// See also ReadFromV2. -func (x GlobalTrust) WriteToV2(m *reputation.GlobalTrust) { - *m = x.m -} +func (x GlobalTrust) writeToV2(m *reputation.GlobalTrust) { + if x.versionSet { + m.Version = new(refs.Version) + x.version.WriteToV2(m.Version) + } else { + m.Version = nil + } -// Init initializes all internal data of the GlobalTrust required by NeoFS API -// protocol. Init MUST be called when creating a new global trust instance. -// Init SHOULD NOT be called multiple times. Init SHOULD NOT be called if -// the GlobalTrust instance is used for decoding only. -func (x *GlobalTrust) Init() { - var ver refs.Version - version.Current().WriteToV2(&ver) + if x.sigSet { + m.Signature = new(refs.Signature) + x.sig.WriteToV2(m.Signature) + } else { + m.Signature = nil + } - x.m.SetVersion(&ver) + m.Body = x.fillBody() } -func (x *GlobalTrust) setBodyField(setter func(*reputation.GlobalTrustBody)) { - if x != nil { - body := x.m.GetBody() - if body == nil { - body = new(reputation.GlobalTrustBody) - x.m.SetBody(body) - } +func (x GlobalTrust) fillBody() *reputation.GlobalTrust_Body { + if !x.managerSet && !x.trustSet { + return nil + } - setter(body) + var body reputation.GlobalTrust_Body + if x.managerSet { + body.Manager = new(reputation.PeerID) + x.manager.WriteToV2(body.Manager) + } + if x.trustSet { + body.Trust = new(reputation.Trust) + x.trust.WriteToV2(body.Trust) } + + return &body } // SetManager sets identifier of the NeoFS reputation system's participant which // performed trust estimation. // -// See also Manager. +// See also [GlobalTrust.Manager]. func (x *GlobalTrust) SetManager(id PeerID) { - var m reputation.PeerID - id.WriteToV2(&m) - - x.setBodyField(func(body *reputation.GlobalTrustBody) { - body.SetManager(&m) - }) + x.manager, x.managerSet = id, true } -// Manager returns peer set using SetManager. +// Manager returns identifier of the NeoFS reputation system's participant which +// performed trust estimation. // // Zero GlobalTrust has zero manager which is incorrect according to the // NeoFS API protocol. -func (x GlobalTrust) Manager() (res PeerID) { - m := x.m.GetBody().GetManager() - if m != nil { - err := res.ReadFromV2(*m) - if err != nil { - panic(fmt.Sprintf("unexpected error from ReadFromV2: %v", err)) - } +// +// See also [GlobalTrust.SetManager]. +func (x GlobalTrust) Manager() PeerID { + if x.managerSet { + return x.manager } - - return + return PeerID{} } // SetTrust sets the global trust score of the network to a specific network // member. // -// See also Trust. +// See also [GlobalTrust.Trust]. func (x *GlobalTrust) SetTrust(trust Trust) { - var m reputation.Trust - trust.WriteToV2(&m) - - x.setBodyField(func(body *reputation.GlobalTrustBody) { - body.SetTrust(&m) - }) + x.trust, x.trustSet = trust, true } -// Trust returns trust set using SetTrust. +// Trust returns the global trust score of the network to a specific network +// member. // // Zero GlobalTrust return zero Trust which is incorrect according to the // NeoFS API protocol. -func (x GlobalTrust) Trust() (res Trust) { - m := x.m.GetBody().GetTrust() - if m != nil { - err := res.ReadFromV2(*m) - if err != nil { - panic(fmt.Sprintf("unexpected error from ReadFromV2: %v", err)) - } +// +// See also [GlobalTrust.Trust]. +func (x GlobalTrust) Trust() Trust { + if x.trustSet { + return x.trust } - - return + return Trust{} } // Sign calculates and writes signature of the [GlobalTrust] data. Returns @@ -365,59 +387,59 @@ func (x GlobalTrust) Trust() (res Trust) { // Note that any [GlobalTrust] mutation is likely to break the signature, so it is // expected to be calculated as a final stage of [GlobalTrust] formation. // -// See also [GlobalTrust.VerifySignature], [GlobalTrust.SignedData]. +// See also [GlobalTrust.VerifySignature]. func (x *GlobalTrust) Sign(signer neofscrypto.Signer) error { - var sig neofscrypto.Signature - - err := sig.CalculateMarshalled(signer, x.m.GetBody(), nil) + err := x.sig.Calculate(signer, x.signedData()) if err != nil { - return fmt.Errorf("calculate signature: %w", err) + return err } - - var sigv2 refs.Signature - sig.WriteToV2(&sigv2) - - x.m.SetSignature(&sigv2) - + x.sigSet = true return nil } -// SignedData returns actual payload to sign. -// -// See also [GlobalTrust.Sign]. -func (x *GlobalTrust) SignedData() []byte { - return x.m.GetBody().StableMarshal(nil) +func (x *GlobalTrust) signedData() []byte { + body := x.fillBody() + b := make([]byte, body.MarshaledSize()) + body.MarshalStable(b) + return b } // VerifySignature checks if GlobalTrust signature is presented and valid. // // Zero GlobalTrust fails the check. // -// See also Sign. +// See also [GlobalTrust.Sign]. func (x GlobalTrust) VerifySignature() bool { - sigV2 := x.m.GetSignature() - if sigV2 == nil { - return false - } - - var sig neofscrypto.Signature - - return sig.ReadFromV2(*sigV2) == nil && sig.Verify(x.m.GetBody().StableMarshal(nil)) + return x.sigSet && x.sig.Verify(x.signedData()) } -// Marshal encodes GlobalTrust into a binary format of the NeoFS API protocol -// (Protocol Buffers with direct field order). +// Marshal encodes GlobalTrust into a Protocol Buffers V3 binary format. // -// See also Unmarshal. +// See also [GlobalTrust.Unmarshal]. func (x GlobalTrust) Marshal() []byte { - return x.m.StableMarshal(nil) + var m reputation.GlobalTrust + x.writeToV2(&m) + + b, err := proto.Marshal(&m) + if err != nil { + // while it is bad to panic on external package return, we can do nothing better + // for this case: how can a normal message not be encoded? + panic(fmt.Errorf("unexpected marshal protobuf message failure: %w", err)) + } + return b } -// Unmarshal decodes NeoFS API protocol binary format into the GlobalTrust -// (Protocol Buffers with direct field order). Returns an error describing -// a format violation. +// Unmarshal decodes Protocol Buffers V3 binary data into the GlobalTrust. +// Returns an error if the message is malformed according to the NeoFS API V2 +// protocol. // -// See also Marshal. +// See also [GlobalTrust.Marshal]. func (x *GlobalTrust) Unmarshal(data []byte) error { - return x.m.Unmarshal(data) + var m reputation.GlobalTrust + err := proto.Unmarshal(data, &m) + if err != nil { + return fmt.Errorf("decode protobuf: %w", err) + } + + return x.readFromV2(&m) } diff --git a/reputation/trust_test.go b/reputation/trust_test.go index ea2e8baeb..70d26ad80 100644 --- a/reputation/trust_test.go +++ b/reputation/trust_test.go @@ -1,213 +1,470 @@ package reputation_test import ( + "math/rand" "testing" - "github.com/nspcc-dev/neofs-api-go/v2/refs" - v2reputation "github.com/nspcc-dev/neofs-api-go/v2/reputation" - "github.com/nspcc-dev/neofs-sdk-go/crypto/test" + apireputation "github.com/nspcc-dev/neofs-sdk-go/api/reputation" "github.com/nspcc-dev/neofs-sdk-go/reputation" reputationtest "github.com/nspcc-dev/neofs-sdk-go/reputation/test" - "github.com/nspcc-dev/neofs-sdk-go/version" + usertest "github.com/nspcc-dev/neofs-sdk-go/user/test" "github.com/stretchr/testify/require" + "google.golang.org/protobuf/proto" ) -func TestTrust_Peer(t *testing.T) { - var trust reputation.Trust +func TestTrustDecoding(t *testing.T) { + t.Run("missing fields", func(t *testing.T) { + for _, testCase := range []struct { + name, err string + corrupt func(trust *apireputation.Trust) + }{ + {name: "peer", err: "missing peer", corrupt: func(tr *apireputation.Trust) { + tr.Peer = nil + }}, + } { + t.Run(testCase.name, func(t *testing.T) { + var src, dst reputation.Trust + var m apireputation.Trust + + // set required data just to not collide with other cases + src.SetPeer(reputationtest.PeerID()) + + src.WriteToV2(&m) + testCase.corrupt(&m) + require.ErrorContains(t, dst.ReadFromV2(&m), testCase.err) + }) + } + }) + t.Run("invalid fields", func(t *testing.T) { + for _, testCase := range []struct { + name, err string + corrupt func(trust *apireputation.Trust) + }{ + {name: "value/negative", err: "invalid trust value -0.1", corrupt: func(tr *apireputation.Trust) { + tr.Value = -0.1 + }}, + {name: "value/overflow", err: "invalid trust value 1.1", corrupt: func(tr *apireputation.Trust) { + tr.Value = 1.1 + }}, + {name: "peer/value/nil", err: "invalid peer: missing value field", corrupt: func(tr *apireputation.Trust) { + tr.Peer.PublicKey = nil + }}, + {name: "peer/value/empty", err: "invalid peer: missing value field", corrupt: func(tr *apireputation.Trust) { + tr.Peer.PublicKey = []byte{} + }}, + {name: "peer/value/wrong length", err: "invalid peer: invalid value length 32", corrupt: func(tr *apireputation.Trust) { + tr.Peer.PublicKey = make([]byte, 32) + }}, + } { + t.Run(testCase.name, func(t *testing.T) { + var src, dst reputation.Trust + var m apireputation.Trust + + // set required data just to not collide with other cases + src.SetPeer(reputationtest.PeerID()) + + src.WriteToV2(&m) + testCase.corrupt(&m) + require.ErrorContains(t, dst.ReadFromV2(&m), testCase.err) + }) + } + }) +} - require.Zero(t, trust.Peer()) +func TestTrust_SetPeer(t *testing.T) { + var tr reputation.Trust - trust = reputationtest.Trust() + require.Zero(t, tr.Peer()) peer := reputationtest.PeerID() - - trust.SetPeer(peer) - - var peerV2 v2reputation.PeerID - peer.WriteToV2(&peerV2) - - var trustV2 v2reputation.Trust - trust.WriteToV2(&trustV2) - - require.Equal(t, &peerV2, trustV2.GetPeer()) - - var val2 reputation.Trust - require.NoError(t, val2.ReadFromV2(trustV2)) - - require.Equal(t, peer, val2.Peer()) + tr.SetPeer(peer) + require.Equal(t, peer, tr.Peer()) + + peerOther := reputationtest.ChangePeerID(peer) + tr.SetPeer(peerOther) + require.Equal(t, peerOther, tr.Peer()) + + t.Run("encoding", func(t *testing.T) { + t.Run("api", func(t *testing.T) { + var src, dst reputation.Trust + var msg apireputation.Trust + + src.WriteToV2(&msg) + require.Zero(t, msg.Peer) + require.ErrorContains(t, dst.ReadFromV2(&msg), "missing peer") + + dst.SetPeer(peerOther) + src.SetPeer(peer) + src.WriteToV2(&msg) + require.Equal(t, &apireputation.PeerID{PublicKey: peer[:]}, msg.Peer) + require.NoError(t, dst.ReadFromV2(&msg)) + require.Equal(t, peer, dst.Peer()) + }) + }) } -func TestTrust_Value(t *testing.T) { - var val reputation.Trust - - require.Zero(t, val.Value()) - - val = reputationtest.Trust() - - const value = 0.75 - - val.SetValue(value) - - var trustV2 v2reputation.Trust - val.WriteToV2(&trustV2) - - require.EqualValues(t, value, trustV2.GetValue()) - - var val2 reputation.Trust - require.NoError(t, val2.ReadFromV2(trustV2)) - - require.EqualValues(t, value, val2.Value()) +func TestTrust_SetValue(t *testing.T) { + var tr reputation.Trust + + require.Zero(t, tr.Value()) + require.Panics(t, func() { tr.SetValue(-0.1) }) + require.Panics(t, func() { tr.SetValue(1.1) }) + + const val = 0.5 + tr.SetValue(val) + require.EqualValues(t, val, tr.Value()) + + const valOther = val + 0.1 + tr.SetValue(valOther) + require.EqualValues(t, valOther, tr.Value()) + + t.Run("encoding", func(t *testing.T) { + t.Run("api", func(t *testing.T) { + var src, dst reputation.Trust + var msg apireputation.Trust + + // set required data just to satisfy decoder + src.SetPeer(reputationtest.PeerID()) + + dst.SetValue(val) + src.WriteToV2(&msg) + require.Zero(t, msg.Value) + require.NoError(t, dst.ReadFromV2(&msg)) + require.Zero(t, dst.Value()) + + src.SetValue(val) + src.WriteToV2(&msg) + require.EqualValues(t, val, msg.Value) + require.NoError(t, dst.ReadFromV2(&msg)) + require.EqualValues(t, val, dst.Value()) + }) + }) } -func TestPeerToPeerTrust_TrustingPeer(t *testing.T) { - var val reputation.PeerToPeerTrust +func TestPeerToPeerTrustDecoding(t *testing.T) { + t.Run("missing fields", func(t *testing.T) { + for _, testCase := range []struct { + name, err string + corrupt func(trust *apireputation.PeerToPeerTrust) + }{ + {name: "trusting peer", err: "missing trusting peer", corrupt: func(tr *apireputation.PeerToPeerTrust) { + tr.TrustingPeer = nil + }}, + {name: "value", err: "missing trust", corrupt: func(tr *apireputation.PeerToPeerTrust) { + tr.Trust = nil + }}, + } { + t.Run(testCase.name, func(t *testing.T) { + var src, dst reputation.PeerToPeerTrust + var m apireputation.PeerToPeerTrust + + // set required data just to not collide with other cases + src.SetTrustingPeer(reputationtest.PeerID()) + src.SetTrust(reputationtest.Trust()) + + src.WriteToV2(&m) + testCase.corrupt(&m) + require.ErrorContains(t, dst.ReadFromV2(&m), testCase.err) + }) + } + }) + t.Run("invalid fields", func(t *testing.T) { + for _, testCase := range []struct { + name, err string + corrupt func(trust *apireputation.PeerToPeerTrust) + }{ + {name: "trusting peer/value/nil", err: "invalid trusting peer: missing value field", corrupt: func(tr *apireputation.PeerToPeerTrust) { + tr.TrustingPeer.PublicKey = nil + }}, + {name: "trusting peer/value/empty", err: "invalid trusting peer: missing value field", corrupt: func(tr *apireputation.PeerToPeerTrust) { + tr.TrustingPeer.PublicKey = []byte{} + }}, + {name: "trusting peer/value/wrong length", err: "invalid trusting peer: invalid value length 32", corrupt: func(tr *apireputation.PeerToPeerTrust) { + tr.TrustingPeer.PublicKey = make([]byte, 32) + }}, + {name: "trust/value/negative", err: "invalid trust: invalid trust value -0.1", corrupt: func(tr *apireputation.PeerToPeerTrust) { + tr.Trust.Value = -0.1 + }}, + {name: "trust/value/overflow", err: "invalid trust: invalid trust value -0.1", corrupt: func(tr *apireputation.PeerToPeerTrust) { + tr.Trust.Value = -0.1 + }}, + {name: "trust/peer/value/nil", err: "invalid trust: invalid peer: missing value field", corrupt: func(tr *apireputation.PeerToPeerTrust) { + tr.Trust.Peer.PublicKey = nil + }}, + {name: "trust/peer/value/empty", err: "invalid trust: invalid peer: missing value field", corrupt: func(tr *apireputation.PeerToPeerTrust) { + tr.Trust.Peer.PublicKey = []byte{} + }}, + {name: "trust/peer/value/wrong length", err: "invalid trust: invalid peer: invalid value length 32", corrupt: func(tr *apireputation.PeerToPeerTrust) { + tr.Trust.Peer.PublicKey = make([]byte, 32) + }}, + } { + t.Run(testCase.name, func(t *testing.T) { + var src, dst reputation.PeerToPeerTrust + var m apireputation.PeerToPeerTrust + + // set required data just to not collide with other cases + src.SetTrustingPeer(reputationtest.PeerID()) + src.SetTrust(reputationtest.Trust()) + + src.WriteToV2(&m) + testCase.corrupt(&m) + require.ErrorContains(t, dst.ReadFromV2(&m), testCase.err) + }) + } + }) +} - require.Zero(t, val.TrustingPeer()) +func TestPeerToPeerTrust_SetTrustingPeer(t *testing.T) { + var tr reputation.PeerToPeerTrust - val = reputationtest.PeerToPeerTrust() + require.Zero(t, tr.TrustingPeer()) peer := reputationtest.PeerID() - - val.SetTrustingPeer(peer) - - var peerV2 v2reputation.PeerID - peer.WriteToV2(&peerV2) - - var trustV2 v2reputation.PeerToPeerTrust - val.WriteToV2(&trustV2) - - require.Equal(t, &peerV2, trustV2.GetTrustingPeer()) - - var val2 reputation.PeerToPeerTrust - require.NoError(t, val2.ReadFromV2(trustV2)) - - require.Equal(t, peer, val2.TrustingPeer()) + tr.SetTrustingPeer(peer) + require.EqualValues(t, peer, tr.TrustingPeer()) + + otherPeer := reputationtest.ChangePeerID(peer) + tr.SetTrustingPeer(otherPeer) + require.EqualValues(t, otherPeer, tr.TrustingPeer()) + + t.Run("encoding", func(t *testing.T) { + t.Run("api", func(t *testing.T) { + var src, dst reputation.PeerToPeerTrust + var msg apireputation.PeerToPeerTrust + + // set required data just to satisfy decoder + src.SetTrust(reputationtest.Trust()) + + src.WriteToV2(&msg) + require.Zero(t, msg.TrustingPeer) + require.ErrorContains(t, dst.ReadFromV2(&msg), "missing trusting peer") + + src.SetTrustingPeer(peer) + src.WriteToV2(&msg) + require.Equal(t, &apireputation.PeerID{PublicKey: peer[:]}, msg.TrustingPeer) + require.NoError(t, dst.ReadFromV2(&msg)) + require.Equal(t, peer, dst.TrustingPeer()) + }) + }) } -func TestPeerToPeerTrust_Trust(t *testing.T) { - var val reputation.PeerToPeerTrust - - require.Zero(t, val.Trust()) - - val = reputationtest.PeerToPeerTrust() - - trust := reputationtest.Trust() +func TestPeerToPeerTrust_SetTrust(t *testing.T) { + var tr reputation.PeerToPeerTrust - val.SetTrust(trust) + require.Zero(t, tr.Trust()) - var trustV2 v2reputation.Trust - trust.WriteToV2(&trustV2) - - var valV2 v2reputation.PeerToPeerTrust - val.WriteToV2(&valV2) - - require.Equal(t, &trustV2, valV2.GetTrust()) - - var val2 reputation.PeerToPeerTrust - require.NoError(t, val2.ReadFromV2(valV2)) - - require.Equal(t, trust, val2.Trust()) + peer := reputationtest.PeerID() + val := rand.Float64() + var trust reputation.Trust + trust.SetPeer(peer) + trust.SetValue(val) + + tr.SetTrust(trust) + require.EqualValues(t, trust, tr.Trust()) + + otherVal := reputationtest.Trust() + tr.SetTrust(otherVal) + require.EqualValues(t, otherVal, tr.Trust()) + + t.Run("encoding", func(t *testing.T) { + t.Run("api", func(t *testing.T) { + var src, dst reputation.PeerToPeerTrust + var msg apireputation.PeerToPeerTrust + + // set required data just to satisfy decoder + src.SetTrustingPeer(reputationtest.PeerID()) + + src.WriteToV2(&msg) + require.Zero(t, msg.Trust) + require.ErrorContains(t, dst.ReadFromV2(&msg), "missing trust") + + src.SetTrust(trust) + src.WriteToV2(&msg) + require.Equal(t, &apireputation.Trust{ + Peer: &apireputation.PeerID{PublicKey: peer[:]}, + Value: val, + }, msg.Trust) + require.NoError(t, dst.ReadFromV2(&msg)) + require.Equal(t, trust, dst.Trust()) + }) + }) } -func TestGlobalTrust_Init(t *testing.T) { - var val reputation.GlobalTrust - val.Init() - - var valV2 v2reputation.GlobalTrust - val.WriteToV2(&valV2) - - var verV2 refs.Version - version.Current().WriteToV2(&verV2) - - require.Equal(t, &verV2, valV2.GetVersion()) +func TestGlobalTrustDecoding(t *testing.T) { + t.Run("invalid binary", func(t *testing.T) { + var tr reputation.GlobalTrust + msg := []byte("definitely_not_protobuf") + err := tr.Unmarshal(msg) + require.ErrorContains(t, err, "decode protobuf") + }) + t.Run("invalid fields", func(t *testing.T) { + for _, testCase := range []struct { + name, err string + corrupt func(trust *apireputation.GlobalTrust) + }{ + {name: "missing version", err: "missing version", corrupt: func(tr *apireputation.GlobalTrust) { + tr.Version = nil + }}, + {name: "missing body", err: "missing body", corrupt: func(tr *apireputation.GlobalTrust) { + tr.Body = nil + }}, + {name: "body/manager/missing", err: "missing manager", corrupt: func(tr *apireputation.GlobalTrust) { + tr.Body.Manager = nil + }}, + {name: "body/manager/value/nil", err: "invalid manager: missing value field", corrupt: func(tr *apireputation.GlobalTrust) { + tr.Body.Manager.PublicKey = nil + }}, + {name: "body/manager/value/empty", err: "invalid manager: missing value field", corrupt: func(tr *apireputation.GlobalTrust) { + tr.Body.Manager.PublicKey = []byte{} + }}, + {name: "body/manager/peer/value/wrong length", err: "invalid manager: invalid value length 32", corrupt: func(tr *apireputation.GlobalTrust) { + tr.Body.Manager.PublicKey = make([]byte, 32) + }}, + {name: "body/trust/peer/value/nil", err: "invalid trust: invalid peer: missing value field", corrupt: func(tr *apireputation.GlobalTrust) { + tr.Body.Trust.Peer.PublicKey = nil + }}, + {name: "body/trust/peer/value/empty", err: "invalid trust: invalid peer: missing value field", corrupt: func(tr *apireputation.GlobalTrust) { + tr.Body.Trust.Peer.PublicKey = []byte{} + }}, + {name: "body/trust/peer/value/wrong length", err: "invalid trust: invalid peer: invalid value length 32", corrupt: func(tr *apireputation.GlobalTrust) { + tr.Body.Trust.Peer.PublicKey = make([]byte, 32) + }}, + {name: "body/trust/value/negative", err: "invalid trust: invalid trust value -0.1", corrupt: func(tr *apireputation.GlobalTrust) { + tr.Body.Trust.Value = -0.1 + }}, + {name: "body/trust/value/overflow", err: "invalid trust: invalid trust value 1.1", corrupt: func(tr *apireputation.GlobalTrust) { + tr.Body.Trust.Value = 1.1 + }}, + } { + t.Run(testCase.name, func(t *testing.T) { + src := reputation.NewGlobalTrust(reputationtest.PeerID(), reputationtest.Trust()) + var dst reputation.GlobalTrust + var m apireputation.GlobalTrust + + // set required data just to not collide with other cases + src.SetManager(reputationtest.PeerID()) + src.SetTrust(reputationtest.Trust()) + + require.NoError(t, proto.Unmarshal(src.Marshal(), &m)) + testCase.corrupt(&m) + b, err := proto.Marshal(&m) + require.NoError(t, err) + require.ErrorContains(t, dst.Unmarshal(b), testCase.err) + }) + } + }) } -func TestGlobalTrust_Manager(t *testing.T) { - var val reputation.GlobalTrust - - require.Zero(t, val.Manager()) - - val = reputationtest.SignedGlobalTrust(t) - +func TestNewGlobalTrust(t *testing.T) { peer := reputationtest.PeerID() - - val.SetManager(peer) - - var peerV2 v2reputation.PeerID - peer.WriteToV2(&peerV2) - - var trustV2 v2reputation.GlobalTrust - val.WriteToV2(&trustV2) - - require.Equal(t, &peerV2, trustV2.GetBody().GetManager()) - - var val2 reputation.GlobalTrust - require.NoError(t, val2.ReadFromV2(trustV2)) - - require.Equal(t, peer, val2.Manager()) + tr := reputationtest.Trust() + + gt := reputation.NewGlobalTrust(peer, tr) + require.False(t, gt.VerifySignature()) + require.Equal(t, peer, gt.Manager()) + require.Equal(t, tr, gt.Trust()) + + t.Run("encoding", func(t *testing.T) { + t.Run("binary", func(t *testing.T) { + src := reputation.NewGlobalTrust(peer, tr) + dst := reputationtest.GlobalTrust() + + err := dst.Unmarshal(src.Marshal()) + require.NoError(t, err) + require.Equal(t, tr, gt.Trust()) + require.Equal(t, peer, gt.Manager()) + }) + }) } -func TestGlobalTrust_Trust(t *testing.T) { - var val reputation.GlobalTrust +func TestGlobalTrust_Sign(t *testing.T) { + var tr reputation.GlobalTrust - require.Zero(t, val.Trust()) + require.False(t, tr.VerifySignature()) - val = reputationtest.SignedGlobalTrust(t) + usr, otherUsr := usertest.TwoUsers() - trust := reputationtest.Trust() + require.Error(t, tr.Sign(usertest.FailSigner(usr))) + require.False(t, tr.VerifySignature()) + require.Error(t, tr.Sign(usertest.FailSigner(otherUsr))) + require.False(t, tr.VerifySignature()) - val.SetTrust(trust) + require.NoError(t, tr.Sign(usr)) + require.True(t, tr.VerifySignature()) - var trustV2 v2reputation.Trust - trust.WriteToV2(&trustV2) + require.NoError(t, tr.Sign(otherUsr)) + require.True(t, tr.VerifySignature()) - var valV2 v2reputation.GlobalTrust - val.WriteToV2(&valV2) + t.Run("encoding", func(t *testing.T) { + t.Run("binary", func(t *testing.T) { + src := reputationtest.GlobalTrustUnsigned() + var dst reputation.GlobalTrust - require.Equal(t, &trustV2, valV2.GetBody().GetTrust()) + require.NoError(t, dst.Sign(usr)) + err := dst.Unmarshal(src.Marshal()) + require.NoError(t, err) + require.False(t, dst.VerifySignature()) - var val2 reputation.GlobalTrust - require.NoError(t, val2.ReadFromV2(valV2)) + require.NoError(t, dst.Sign(otherUsr)) + require.NoError(t, src.Sign(usr)) + err = dst.Unmarshal(src.Marshal()) + require.NoError(t, err) + require.True(t, dst.VerifySignature()) - require.Equal(t, trust, val2.Trust()) + require.NoError(t, src.Sign(otherUsr)) + err = dst.Unmarshal(src.Marshal()) + require.NoError(t, err) + require.True(t, dst.VerifySignature()) + }) + }) } -func TestGlobalTrust_Sign(t *testing.T) { - val := reputationtest.GlobalTrust() +func TestGlobalTrust_SetManager(t *testing.T) { + var tr reputation.GlobalTrust - require.False(t, val.VerifySignature()) + require.Zero(t, tr.Manager()) - require.NoError(t, val.Sign(test.RandomSigner(t))) - - var valV2 v2reputation.GlobalTrust - val.WriteToV2(&valV2) - - require.NotZero(t, valV2.GetSignature()) - - var val2 reputation.GlobalTrust - require.NoError(t, val2.ReadFromV2(valV2)) - - require.True(t, val2.VerifySignature()) + peer := reputationtest.PeerID() + tr.SetManager(peer) + require.Equal(t, peer, tr.Manager()) + + otherPeer := reputationtest.ChangePeerID(peer) + tr.SetManager(otherPeer) + require.Equal(t, otherPeer, tr.Manager()) + + t.Run("encoding", func(t *testing.T) { + t.Run("binary", func(t *testing.T) { + src := reputation.NewGlobalTrust(peer, reputationtest.Trust()) + var dst reputation.GlobalTrust + + dst.SetManager(otherPeer) + require.NoError(t, dst.Unmarshal(src.Marshal())) + require.Equal(t, peer, dst.Manager()) + }) + }) } -func TestGlobalTrust_SignedData(t *testing.T) { - val := reputationtest.GlobalTrust() +func TestGlobalTrust_SetTrust(t *testing.T) { + var tr reputation.GlobalTrust - require.False(t, val.VerifySignature()) - signer := test.RandomSigner(t) - - test.SignedDataComponent(t, signer, &val) -} + require.Zero(t, tr.Trust()) -func TestGlobalTrustEncoding(t *testing.T) { - val := reputationtest.SignedGlobalTrust(t) + val := reputationtest.Trust() + tr.SetTrust(val) + require.Equal(t, val, tr.Trust()) - t.Run("binary", func(t *testing.T) { - data := val.Marshal() + otherVal := reputationtest.Trust() + tr.SetTrust(otherVal) + require.Equal(t, otherVal, tr.Trust()) - var val2 reputation.GlobalTrust - require.NoError(t, val2.Unmarshal(data)) + t.Run("encoding", func(t *testing.T) { + t.Run("binary", func(t *testing.T) { + src := reputation.NewGlobalTrust(reputationtest.PeerID(), val) + var dst reputation.GlobalTrust - require.Equal(t, val, val2) + dst.SetTrust(otherVal) + require.NoError(t, dst.Unmarshal(src.Marshal())) + require.Equal(t, val, dst.Trust()) + }) }) } diff --git a/session/common.go b/session/common.go index e72a3ff29..ec0ce6058 100644 --- a/session/common.go +++ b/session/common.go @@ -6,8 +6,8 @@ import ( "fmt" "github.com/google/uuid" - "github.com/nspcc-dev/neofs-api-go/v2/refs" - "github.com/nspcc-dev/neofs-api-go/v2/session" + "github.com/nspcc-dev/neofs-sdk-go/api/refs" + "github.com/nspcc-dev/neofs-sdk-go/api/session" neofscrypto "github.com/nspcc-dev/neofs-sdk-go/crypto" "github.com/nspcc-dev/neofs-sdk-go/user" ) @@ -25,35 +25,24 @@ type commonData struct { authKey []byte sigSet bool - sig refs.Signature + sig neofscrypto.Signature } -type contextReader func(session.TokenContext, bool) error - func (x commonData) copyTo(dst *commonData) { dst.idSet = x.idSet - copy(dst.id[:], x.id[:]) - + dst.id = x.id dst.issuerSet = x.issuerSet - iss := x.issuer - dst.issuer = iss - + dst.issuer = x.issuer dst.lifetimeSet = x.lifetimeSet dst.iat = x.iat dst.nbf = x.nbf dst.exp = x.exp dst.authKey = bytes.Clone(x.authKey) dst.sigSet = x.sigSet - dst.sig.SetKey(bytes.Clone(x.sig.GetKey())) - dst.sig.SetScheme(x.sig.GetScheme()) - dst.sig.SetSign(bytes.Clone(x.sig.GetSign())) + x.sig.CopyTo(&dst.sig) } -// reads commonData and custom context from the session.Token message. -// If checkFieldPresence is set, returns an error on absence of any protocol-required -// field. Verifies format of any presented field according to NeoFS API V2 protocol. -// Calls contextReader if session context is set. Passes checkFieldPresence into contextReader. -func (x *commonData) readFromV2(m session.Token, checkFieldPresence bool, r contextReader) error { +func (x *commonData) readFromV2(m *session.SessionToken, checkFieldPresence bool) error { var err error body := m.GetBody() @@ -61,7 +50,7 @@ func (x *commonData) readFromV2(m session.Token, checkFieldPresence bool, r cont return errors.New("missing token body") } - binID := body.GetID() + binID := body.GetId() if x.idSet = len(binID) > 0; x.idSet { err = x.id.UnmarshalBinary(binID) if err != nil { @@ -73,9 +62,9 @@ func (x *commonData) readFromV2(m session.Token, checkFieldPresence bool, r cont return errors.New("missing session ID") } - issuer := body.GetOwnerID() + issuer := body.GetOwnerId() if x.issuerSet = issuer != nil; x.issuerSet { - err = x.issuer.ReadFromV2(*issuer) + err = x.issuer.ReadFromV2(issuer) if err != nil { return fmt.Errorf("invalid session issuer: %w", err) } @@ -97,140 +86,48 @@ func (x *commonData) readFromV2(m session.Token, checkFieldPresence bool, r cont return errors.New("missing session public key") } - c := body.GetContext() - if c != nil { - err = r(c, checkFieldPresence) - if err != nil { - return fmt.Errorf("invalid context: %w", err) - } - } else if checkFieldPresence { - return errors.New("missing session context") - } - sig := m.GetSignature() if x.sigSet = sig != nil; sig != nil { - x.sig = *sig - } else if checkFieldPresence { - return errors.New("missing body signature") + err = x.sig.ReadFromV2(sig) + if err != nil { + return fmt.Errorf("invalid body signature: %w", err) + } } return nil } -type contextWriter func() session.TokenContext - -func (x commonData) fillBody(w contextWriter) *session.TokenBody { - var body session.TokenBody +func (x commonData) fillBody() *session.SessionToken_Body { + body := session.SessionToken_Body{ + SessionKey: x.authKey, + } if x.idSet { - binID, err := x.id.MarshalBinary() - if err != nil { - panic(fmt.Sprintf("unexpected error from UUID.MarshalBinary: %v", err)) - } - - body.SetID(binID) + body.Id = x.id[:] } if x.issuerSet { - var issuer refs.OwnerID - x.issuer.WriteToV2(&issuer) - - body.SetOwnerID(&issuer) + body.OwnerId = new(refs.OwnerID) + x.issuer.WriteToV2(body.OwnerId) } if x.lifetimeSet { - var lifetime session.TokenLifetime - lifetime.SetIat(x.iat) - lifetime.SetNbf(x.nbf) - lifetime.SetExp(x.exp) - - body.SetLifetime(&lifetime) + body.Lifetime = &session.SessionToken_Body_TokenLifetime{ + Exp: x.exp, + Nbf: x.nbf, + Iat: x.iat, + } } - body.SetSessionKey(x.authKey) - - body.SetContext(w()) - return &body } -func (x commonData) writeToV2(m *session.Token, w contextWriter) { - body := x.fillBody(w) - - m.SetBody(body) - - var sig *refs.Signature - +func (x commonData) writeToV2(m *session.SessionToken) { + m.Body = x.fillBody() if x.sigSet { - sig = &x.sig - } - - m.SetSignature(sig) -} - -func (x commonData) signedData(w contextWriter) []byte { - return x.fillBody(w).StableMarshal(nil) -} - -func (x *commonData) sign(signer neofscrypto.Signer, w contextWriter) error { - var sig neofscrypto.Signature - - err := sig.Calculate(signer, x.signedData(w)) - if err != nil { - return err + m.Signature = new(refs.Signature) + x.sig.WriteToV2(m.Signature) } - - sig.WriteToV2(&x.sig) - x.sigSet = true - - return nil -} - -func (x commonData) verifySignature(w contextWriter) bool { - if !x.sigSet { - return false - } - - var sig neofscrypto.Signature - - // TODO: (#233) check owner<->key relation - return sig.ReadFromV2(x.sig) == nil && sig.Verify(x.signedData(w)) -} - -func (x commonData) marshal(w contextWriter) []byte { - var m session.Token - x.writeToV2(&m, w) - - return m.StableMarshal(nil) -} - -func (x *commonData) unmarshal(data []byte, r contextReader) error { - var m session.Token - - err := m.Unmarshal(data) - if err != nil { - return err - } - - return x.readFromV2(m, false, r) -} - -func (x commonData) marshalJSON(w contextWriter) ([]byte, error) { - var m session.Token - x.writeToV2(&m, w) - - return m.MarshalJSON() -} - -func (x *commonData) unmarshalJSON(data []byte, r contextReader) error { - var m session.Token - - err := m.UnmarshalJSON(data) - if err != nil { - return err - } - - return x.readFromV2(m, false, r) } // SetExp sets "exp" (expiration time) claim which identifies the expiration @@ -241,12 +138,22 @@ func (x *commonData) unmarshalJSON(data []byte, r contextReader) error { // // Naming is inspired by https://datatracker.ietf.org/doc/html/rfc7519#section-4.1.4. // -// See also ExpiredAt. +// See also ExpiredAt, SetExp. func (x *commonData) SetExp(exp uint64) { x.exp = exp x.lifetimeSet = true } +// Exp returns "exp" claim. +// +// See also SetExp. +func (x commonData) Exp() uint64 { + if x.lifetimeSet { + return x.exp + } + return 0 +} + // SetNbf sets "nbf" (not before) claim which identifies the time (in NeoFS // epochs) before which the session MUST NOT be accepted for processing. // The processing of the "nbf" claim requires that the current date/time MUST be @@ -254,12 +161,22 @@ func (x *commonData) SetExp(exp uint64) { // // Naming is inspired by https://datatracker.ietf.org/doc/html/rfc7519#section-4.1.5. // -// See also InvalidAt. +// See also Nbf, InvalidAt. func (x *commonData) SetNbf(nbf uint64) { x.nbf = nbf x.lifetimeSet = true } +// Nbf returns "nbf" claim. +// +// See also SetNbf. +func (x commonData) Nbf() uint64 { + if x.lifetimeSet { + return x.nbf + } + return 0 +} + // SetIat sets "iat" (issued at) claim which identifies the time (in NeoFS // epochs) at which the session was issued. This claim can be used to // determine the age of the session. @@ -272,6 +189,16 @@ func (x *commonData) SetIat(iat uint64) { x.lifetimeSet = true } +// Iat returns "iat" claim. +// +// See also SetIat. +func (x commonData) Iat() uint64 { + if x.lifetimeSet { + return x.iat + } + return 0 +} + func (x commonData) expiredAt(epoch uint64) bool { return !x.lifetimeSet || x.exp < epoch } @@ -354,9 +281,9 @@ func (x commonData) Issuer() user.ID { // IssuerPublicKeyBytes returns binary-encoded public key of the session issuer. // // IssuerPublicKeyBytes MUST NOT be called before ReadFromV2 or Sign methods. -func (x *commonData) IssuerPublicKeyBytes() []byte { +func (x commonData) IssuerPublicKeyBytes() []byte { if x.sigSet { - return x.sig.GetKey() + return x.sig.PublicKeyBytes() } return nil diff --git a/session/common_internal_test.go b/session/common_internal_test.go new file mode 100644 index 000000000..8a540b085 --- /dev/null +++ b/session/common_internal_test.go @@ -0,0 +1,47 @@ +package session + +import ( + "bytes" + "testing" + + oid "github.com/nspcc-dev/neofs-sdk-go/object/id" + oidtest "github.com/nspcc-dev/neofs-sdk-go/object/id/test" + usertest "github.com/nspcc-dev/neofs-sdk-go/user/test" + "github.com/stretchr/testify/require" +) + +func TestCopyTo(t *testing.T) { + usr, _ := usertest.TwoUsers() + + var cnr Container + var obj Object + cnr.SetAuthKey(usr.Public()) + obj.SetAuthKey(usr.Public()) + obj.LimitByObjects(oidtest.NIDs(3)) + + objsCp := make([]oid.ID, len(obj.objs)) + copy(objsCp, obj.objs) + authKeyCp := bytes.Clone(obj.authKey) + + cnrShallow := cnr + objShallow := obj + var cnrDeep Container + cnr.CopyTo(&cnrDeep) + var objDeep Object + obj.CopyTo(&objDeep) + require.Equal(t, cnr, cnrShallow) + require.Equal(t, cnr, cnrDeep) + require.Equal(t, obj, objShallow) + require.Equal(t, obj, objDeep) + + cnr.authKey[0]++ + obj.authKey[0]++ + require.Equal(t, cnr.authKey, cnrShallow.authKey) + require.Equal(t, obj.authKey, objShallow.authKey) + require.Equal(t, authKeyCp, cnrDeep.authKey) + require.Equal(t, authKeyCp, objDeep.authKey) + + obj.objs[1][0]++ + require.Equal(t, obj.objs, objShallow.objs) + require.Equal(t, objsCp, objDeep.objs) +} diff --git a/session/common_test.go b/session/common_test.go index 5da0fd6b7..0a0262178 100644 --- a/session/common_test.go +++ b/session/common_test.go @@ -1,234 +1,765 @@ -package session +package session_test import ( "bytes" + "encoding/json" + "math" + "math/rand" "testing" "github.com/google/uuid" - "github.com/nspcc-dev/neofs-api-go/v2/refs" - "github.com/nspcc-dev/neofs-api-go/v2/session" - "github.com/nspcc-dev/neofs-sdk-go/crypto/test" + "github.com/nspcc-dev/neofs-sdk-go/api/refs" + apisession "github.com/nspcc-dev/neofs-sdk-go/api/session" + neofscrypto "github.com/nspcc-dev/neofs-sdk-go/crypto" + neofsecdsa "github.com/nspcc-dev/neofs-sdk-go/crypto/ecdsa" + "github.com/nspcc-dev/neofs-sdk-go/user" + usertest "github.com/nspcc-dev/neofs-sdk-go/user/test" "github.com/stretchr/testify/require" + "google.golang.org/protobuf/encoding/protojson" + "google.golang.org/protobuf/proto" ) -func Test_commonData_copyTo(t *testing.T) { - var sig refs.Signature - - sig.SetKey([]byte("key")) - sig.SetSign([]byte("sign")) - sig.SetScheme(refs.ECDSA_SHA512) - - signer := test.RandomSignerRFC6979(t) - - data := commonData{ - idSet: true, - id: uuid.New(), - issuerSet: true, - issuer: signer.UserID(), - lifetimeSet: true, - iat: 1, - nbf: 2, - exp: 3, - authKey: []byte{1, 2, 3, 4}, - sigSet: true, - sig: sig, - } - - t.Run("copy", func(t *testing.T) { - var dst commonData - data.copyTo(&dst) - - emptyWriter := func() session.TokenContext { - return &session.ContainerSessionContext{} - } +type token interface { + Marshal() []byte + SignedData() []byte + json.Marshaler + WriteToV2(*apisession.SessionToken) - require.Equal(t, data, dst) - require.True(t, bytes.Equal(data.marshal(emptyWriter), dst.marshal(emptyWriter))) + Issuer() user.ID + IssuerPublicKeyBytes() []byte + ID() uuid.UUID + AssertAuthKey(neofscrypto.PublicKey) bool + InvalidAt(uint64) bool - require.Equal(t, data.issuerSet, dst.issuerSet) - require.Equal(t, data.issuer.String(), dst.issuer.String()) - }) + VerifySignature() bool +} - t.Run("change id", func(t *testing.T) { - var dst commonData - data.copyTo(&dst) +type tokenptr interface { + token + Unmarshal([]byte) error + UnmarshalSignedData([]byte) error + json.Unmarshaler + ReadFromV2(*apisession.SessionToken) error + + SetIssuer(user.ID) + SetID(uuid.UUID) + SetAuthKey(neofscrypto.PublicKey) + SetExp(uint64) + SetNbf(uint64) + SetIat(uint64) + + Sign(user.Signer) error + SetSignature(neofscrypto.Signer) error +} - require.Equal(t, data.idSet, dst.idSet) - require.Equal(t, data.id.String(), dst.id.String()) +func setRequiredTokenAPIFields(tok tokenptr) { + usr, _ := usertest.TwoUsers() + tok.SetIssuer(usertest.ID()) + tok.SetID(uuid.New()) + tok.SetAuthKey(usr.Public()) + tok.SetExp(1) +} - dst.SetID(uuid.New()) +type invalidAPITestCase struct { + name, err string + corrupt func(*apisession.SessionToken) +} - require.Equal(t, data.idSet, dst.idSet) - require.NotEqual(t, data.id.String(), dst.id.String()) +func testDecoding[T token, PTR interface { + *T + tokenptr +}](t *testing.T, full func() T, customCases []invalidAPITestCase) { + t.Run("missing fields", func(t *testing.T) { + for _, testCase := range []invalidAPITestCase{ + {name: "body", err: "missing token body", corrupt: func(st *apisession.SessionToken) { + st.Body = nil + }}, + {name: "body/ID/nil", err: "missing session ID", corrupt: func(st *apisession.SessionToken) { + st.Body.Id = nil + }}, + {name: "body/ID/empty", err: "missing session ID", corrupt: func(st *apisession.SessionToken) { + st.Body.Id = []byte{} + }}, + {name: "body/issuer", err: "missing session issuer", corrupt: func(st *apisession.SessionToken) { + st.Body.OwnerId = nil + }}, + {name: "body/lifetime", err: "missing token lifetime", corrupt: func(st *apisession.SessionToken) { + st.Body.Lifetime = nil + }}, + {name: "body/session key/nil", err: "missing session public key", corrupt: func(st *apisession.SessionToken) { + st.Body.SessionKey = nil + }}, + {name: "body/session key/empty", err: "missing session public key", corrupt: func(st *apisession.SessionToken) { + st.Body.SessionKey = []byte{} + }}, + {name: "body/context/nil", err: "missing session context", corrupt: func(st *apisession.SessionToken) { + st.Body.Context = nil + }}, + } { + t.Run(testCase.name, func(t *testing.T) { + src := full() + var dst PTR = new(T) + var m apisession.SessionToken + + src.WriteToV2(&m) + testCase.corrupt(&m) + require.ErrorContains(t, dst.ReadFromV2(&m), testCase.err) + + b, err := proto.Marshal(&m) + require.NoError(t, err) + require.NoError(t, dst.Unmarshal(b)) + + j, err := protojson.Marshal(&m) + require.NoError(t, err) + require.NoError(t, dst.UnmarshalJSON(j)) + }) + } }) - - t.Run("overwrite id", func(t *testing.T) { - // id is not set - local := commonData{} - require.False(t, local.idSet) - - // id is set - var dst commonData - dst.SetID(uuid.New()) - require.True(t, dst.idSet) - - // overwrite ID data - local.copyTo(&dst) - - emptyWriter := func() session.TokenContext { - return &session.ContainerSessionContext{} + t.Run("invalid fields", func(t *testing.T) { + for _, testCase := range append(customCases, []invalidAPITestCase{ + {name: "signature/key/nil", err: "invalid body signature: missing public key", corrupt: func(st *apisession.SessionToken) { + st.Signature.Key = nil + }}, + {name: "signature/key/empty", err: "invalid body signature: missing public key", corrupt: func(st *apisession.SessionToken) { + st.Signature.Key = []byte{} + }}, + {name: "signature/signature/nil", err: "invalid body signature: missing signature", corrupt: func(st *apisession.SessionToken) { + st.Signature.Sign = nil + }}, + {name: "signature/signature/empty", err: "invalid body signature: missing signature", corrupt: func(st *apisession.SessionToken) { + st.Signature.Sign = []byte{} + }}, + {name: "signature/unsupported scheme", err: "invalid body signature: unsupported scheme 2147483647", corrupt: func(st *apisession.SessionToken) { + st.Signature.Scheme = math.MaxInt32 + }}, + {name: "body/ID/wrong length", err: "invalid session ID: invalid UUID (got 15 bytes)", corrupt: func(st *apisession.SessionToken) { + st.Body.Id = make([]byte, 15) + }}, + {name: "body/ID/wrong prefix", err: "invalid session UUID version 3", corrupt: func(st *apisession.SessionToken) { + st.Body.Id[6] = 3 << 4 + }}, + {name: "body/issuer/value/nil", err: "invalid session issuer: missing value field", corrupt: func(st *apisession.SessionToken) { + st.Body.OwnerId.Value = nil + }}, + {name: "body/issuer/value/empty", err: "invalid session issuer: missing value field", corrupt: func(st *apisession.SessionToken) { + st.Body.OwnerId.Value = []byte{} + }}, + {name: "body/issuer/value/wrong length", err: "invalid session issuer: invalid value length 24", corrupt: func(st *apisession.SessionToken) { + st.Body.OwnerId.Value = make([]byte, 24) + }}, + {name: "body/issuer/value/wrong prefix", err: "invalid session issuer: invalid prefix byte 0x42, expected 0x35", corrupt: func(st *apisession.SessionToken) { + st.Body.OwnerId.Value[0] = 0x42 + }}, + {name: "body/issuer/value/checksum mismatch", err: "invalid session issuer: value checksum mismatch", corrupt: func(st *apisession.SessionToken) { + st.Body.OwnerId.Value[len(st.Body.OwnerId.Value)-1]++ + }}, + }...) { + t.Run(testCase.name, func(t *testing.T) { + src := full() + var dst PTR = new(T) + var m apisession.SessionToken + + src.WriteToV2(&m) + testCase.corrupt(&m) + require.ErrorContains(t, dst.ReadFromV2(&m), testCase.err) + + b, err := proto.Marshal(&m) + require.NoError(t, err) + require.ErrorContains(t, dst.Unmarshal(b), testCase.err) + + j, err := protojson.Marshal(&m) + require.NoError(t, err) + require.ErrorContains(t, dst.UnmarshalJSON(j), testCase.err) + }) } - require.True(t, bytes.Equal(local.marshal(emptyWriter), dst.marshal(emptyWriter))) - - require.False(t, local.idSet) - require.False(t, dst.idSet) - - // update id - dst.SetID(uuid.New()) - - // check that affects only dst - require.False(t, local.idSet) - require.True(t, dst.idSet) }) +} - t.Run("change issuer", func(t *testing.T) { - var dst commonData - data.copyTo(&dst) - - require.Equal(t, data.issuerSet, dst.issuerSet) - require.True(t, data.issuer.Equals(dst.issuer)) - - dst.SetIssuer(test.RandomSignerRFC6979(t).UserID()) +func testCopyTo[T interface { + token + CopyTo(*T) +}](t *testing.T, full T) { + shallow := full + var deep T + full.CopyTo(&deep) + require.Equal(t, full, deep) + require.Equal(t, full, shallow) + + originIssuerKey := bytes.Clone(full.IssuerPublicKeyBytes()) + issuerKey := full.IssuerPublicKeyBytes() + issuerKey[0]++ + require.Equal(t, issuerKey, full.IssuerPublicKeyBytes()) + require.Equal(t, issuerKey, shallow.IssuerPublicKeyBytes()) + require.Equal(t, originIssuerKey, deep.IssuerPublicKeyBytes()) +} - require.Equal(t, data.issuerSet, dst.issuerSet) - require.False(t, data.issuer.Equals(dst.issuer)) +func testSignedData[T token, PTR interface { + *T + tokenptr +}](t *testing.T, unsigned T) { + t.Run("invalid binary", func(t *testing.T) { + var c PTR = new(T) + msg := []byte("definitely_not_protobuf") + err := c.UnmarshalSignedData(msg) + require.ErrorContains(t, err, "decode protobuf") }) - t.Run("overwrite issuer", func(t *testing.T) { - var local commonData - require.False(t, local.issuerSet) - - var dst commonData - dst.SetIssuer(test.RandomSignerRFC6979(t).UserID()) - require.True(t, dst.issuerSet) + var x2 PTR = new(T) + var m apisession.SessionToken_Body - local.copyTo(&dst) - require.False(t, local.issuerSet) - require.False(t, dst.issuerSet) + b := unsigned.SignedData() + require.NoError(t, x2.UnmarshalSignedData(b)) + require.Equal(t, unsigned, *x2) - emptyWriter := func() session.TokenContext { - return &session.ContainerSessionContext{} - } - require.True(t, bytes.Equal(local.marshal(emptyWriter), dst.marshal(emptyWriter))) - - require.Equal(t, local.issuerSet, dst.issuerSet) - require.True(t, local.issuer.Equals(dst.issuer)) - - dst.SetIssuer(test.RandomSignerRFC6979(t).UserID()) - require.False(t, local.issuerSet) - require.True(t, dst.issuerSet) + require.NoError(t, proto.Unmarshal(b, &m)) + b, err := proto.Marshal(&m) + require.NoError(t, err) + var x3 PTR = new(T) + require.NoError(t, x3.UnmarshalSignedData(b)) + require.Equal(t, unsigned, *x3) +} - require.False(t, local.issuer.Equals(dst.issuer)) +func testAuthKey[T token, PTR interface { + *T + tokenptr +}](t *testing.T, setRequiredAPIFields func(PTR)) { + var c PTR = new(T) + usr, otherUsr := usertest.TwoUsers() + + require.False(t, c.AssertAuthKey(usr.Public())) + require.False(t, c.AssertAuthKey(otherUsr.Public())) + + c.SetAuthKey(usr.Public()) + require.True(t, c.AssertAuthKey(usr.Public())) + require.False(t, c.AssertAuthKey(otherUsr.Public())) + + c.SetAuthKey(otherUsr.Public()) + require.False(t, c.AssertAuthKey(usr.Public())) + require.True(t, c.AssertAuthKey(otherUsr.Public())) + + t.Run("encoding", func(t *testing.T) { + t.Run("binary", func(t *testing.T) { + var src, dst PTR = new(T), new(T) + + dst.SetAuthKey(usr.Public()) + err := dst.Unmarshal(src.Marshal()) + require.NoError(t, err) + require.False(t, dst.AssertAuthKey(usr.Public())) + require.False(t, dst.AssertAuthKey(otherUsr.Public())) + + dst.SetAuthKey(otherUsr.Public()) + src.SetAuthKey(usr.Public()) + err = dst.Unmarshal(src.Marshal()) + require.NoError(t, err) + require.True(t, dst.AssertAuthKey(usr.Public())) + require.False(t, dst.AssertAuthKey(otherUsr.Public())) + }) + t.Run("api", func(t *testing.T) { + var src, dst PTR = new(T), new(T) + var msg apisession.SessionToken + + // set required data just to satisfy decoder + setRequiredAPIFields(src) + + dst.SetAuthKey(otherUsr.Public()) + src.SetAuthKey(usr.Public()) + src.WriteToV2(&msg) + require.Equal(t, usr.PublicKeyBytes, msg.Body.SessionKey) + require.NoError(t, dst.ReadFromV2(&msg)) + require.True(t, dst.AssertAuthKey(usr.Public())) + require.False(t, dst.AssertAuthKey(otherUsr.Public())) + }) + t.Run("json", func(t *testing.T) { + var src, dst PTR = new(T), new(T) + + dst.SetAuthKey(usr.Public()) + j, err := src.MarshalJSON() + require.NoError(t, err) + err = dst.UnmarshalJSON(j) + require.NoError(t, err) + require.False(t, dst.AssertAuthKey(usr.Public())) + require.False(t, dst.AssertAuthKey(otherUsr.Public())) + + dst.SetAuthKey(otherUsr.Public()) + src.SetAuthKey(usr.Public()) + j, err = src.MarshalJSON() + require.NoError(t, err) + err = dst.UnmarshalJSON(j) + require.NoError(t, err) + require.True(t, dst.AssertAuthKey(usr.Public())) + require.False(t, dst.AssertAuthKey(otherUsr.Public())) + }) }) +} - t.Run("change lifetime", func(t *testing.T) { - var dst commonData - data.copyTo(&dst) - - require.Equal(t, data.lifetimeSet, dst.lifetimeSet) - require.Equal(t, data.iat, dst.iat) - require.Equal(t, data.nbf, dst.nbf) - require.Equal(t, data.exp, dst.exp) - - dst.SetExp(100) - dst.SetIat(200) - dst.SetNbf(300) - - require.Equal(t, data.lifetimeSet, dst.lifetimeSet) - require.NotEqual(t, data.iat, dst.iat) - require.NotEqual(t, data.nbf, dst.nbf) - require.NotEqual(t, data.exp, dst.exp) +func testSign[T token, PTR interface { + *T + tokenptr +}](t *testing.T, setRequiredAPIFields func(PTR)) { + var c PTR = new(T) + + require.False(t, c.VerifySignature()) + + usr, otherUsr := usertest.TwoUsers() + usrSigner := user.NewAutoIDSignerRFC6979(usr.PrivateKey) + otherUsrSigner := user.NewAutoIDSignerRFC6979(otherUsr.PrivateKey) + + require.Error(t, c.Sign(usertest.FailSigner(usr))) + require.False(t, c.VerifySignature()) + require.Error(t, c.Sign(usertest.FailSigner(otherUsr))) + require.False(t, c.VerifySignature()) + + require.NoError(t, c.Sign(usrSigner)) + require.True(t, c.VerifySignature()) + require.Equal(t, usr.ID, c.Issuer()) + require.Equal(t, usr.PublicKeyBytes, c.IssuerPublicKeyBytes()) + + require.NoError(t, c.Sign(otherUsrSigner)) + require.True(t, c.VerifySignature()) + require.Equal(t, otherUsr.ID, c.Issuer()) + require.Equal(t, otherUsr.PublicKeyBytes, c.IssuerPublicKeyBytes()) + + t.Run("encoding", func(t *testing.T) { + t.Run("binary", func(t *testing.T) { + var src, dst PTR = new(T), new(T) + + require.NoError(t, dst.Sign(usrSigner)) + err := dst.Unmarshal(src.Marshal()) + require.NoError(t, err) + require.False(t, dst.VerifySignature()) + require.Zero(t, dst.Issuer()) + require.Zero(t, dst.IssuerPublicKeyBytes()) + + require.NoError(t, dst.Sign(otherUsrSigner)) + require.NoError(t, src.Sign(usrSigner)) + err = dst.Unmarshal(src.Marshal()) + require.NoError(t, err) + require.True(t, dst.VerifySignature()) + require.Equal(t, usr.ID, dst.Issuer()) + require.Equal(t, usr.PublicKeyBytes, dst.IssuerPublicKeyBytes()) + + require.NoError(t, src.Sign(otherUsrSigner)) + err = dst.Unmarshal(src.Marshal()) + require.NoError(t, err) + require.True(t, dst.VerifySignature()) + require.Equal(t, otherUsr.ID, dst.Issuer()) + require.Equal(t, otherUsr.PublicKeyBytes, dst.IssuerPublicKeyBytes()) + }) + t.Run("api", func(t *testing.T) { + var src, dst PTR = new(T), new(T) + var msg apisession.SessionToken + + // set required data just to satisfy decoder + setRequiredAPIFields(src) + + require.NoError(t, dst.Sign(usrSigner)) + src.WriteToV2(&msg) + require.Zero(t, msg.Signature) + require.NoError(t, dst.ReadFromV2(&msg)) + require.False(t, dst.VerifySignature()) + require.Equal(t, src.Issuer(), dst.Issuer()) + require.Zero(t, dst.IssuerPublicKeyBytes()) + + require.NoError(t, dst.Sign(otherUsrSigner)) + require.NoError(t, src.Sign(usrSigner)) + src.WriteToV2(&msg) + require.Equal(t, usr.PublicKeyBytes, msg.Signature.Key) + require.NotEmpty(t, msg.Signature.Sign) + require.EqualValues(t, usrSigner.Scheme(), msg.Signature.Scheme) + require.NoError(t, dst.ReadFromV2(&msg)) + require.True(t, dst.VerifySignature()) + require.Equal(t, usr.ID, dst.Issuer()) + require.Equal(t, usr.PublicKeyBytes, dst.IssuerPublicKeyBytes()) + + require.NoError(t, dst.Sign(usrSigner)) + require.NoError(t, src.Sign(otherUsrSigner)) + src.WriteToV2(&msg) + require.Equal(t, otherUsr.PublicKeyBytes, msg.Signature.Key) + require.NotEmpty(t, msg.Signature.Sign) + require.EqualValues(t, otherUsrSigner.Scheme(), msg.Signature.Scheme) + require.NoError(t, dst.ReadFromV2(&msg)) + require.True(t, dst.VerifySignature()) + require.Equal(t, otherUsr.ID, dst.Issuer()) + require.Equal(t, otherUsr.PublicKeyBytes, dst.IssuerPublicKeyBytes()) + }) + t.Run("json", func(t *testing.T) { + var src, dst PTR = new(T), new(T) + + require.NoError(t, dst.Sign(usrSigner)) + j, err := src.MarshalJSON() + require.NoError(t, err) + err = dst.UnmarshalJSON(j) + require.NoError(t, err) + require.False(t, dst.VerifySignature()) + require.Zero(t, dst.Issuer()) + require.Zero(t, dst.IssuerPublicKeyBytes()) + + require.NoError(t, dst.Sign(otherUsrSigner)) + require.NoError(t, src.Sign(usrSigner)) + j, err = src.MarshalJSON() + require.NoError(t, err) + err = dst.UnmarshalJSON(j) + require.NoError(t, err) + require.True(t, dst.VerifySignature()) + require.Equal(t, usr.ID, dst.Issuer()) + require.Equal(t, usr.PublicKeyBytes, dst.IssuerPublicKeyBytes()) + + require.NoError(t, dst.Sign(usrSigner)) + require.NoError(t, src.Sign(otherUsrSigner)) + j, err = src.MarshalJSON() + require.NoError(t, err) + err = dst.UnmarshalJSON(j) + require.NoError(t, err) + require.True(t, dst.VerifySignature()) + require.Equal(t, otherUsr.ID, dst.Issuer()) + require.Equal(t, otherUsr.PublicKeyBytes, dst.IssuerPublicKeyBytes()) + }) }) +} - t.Run("overwrite lifetime", func(t *testing.T) { - // lifetime is not set - local := commonData{} - require.False(t, local.lifetimeSet) - - // lifetime is set - var dst commonData - dst.SetExp(100) - dst.SetIat(200) - dst.SetNbf(300) - require.True(t, dst.lifetimeSet) - - local.copyTo(&dst) - require.False(t, local.lifetimeSet) - require.False(t, dst.lifetimeSet) - - emptyWriter := func() session.TokenContext { - return &session.ContainerSessionContext{} - } - require.True(t, bytes.Equal(local.marshal(emptyWriter), dst.marshal(emptyWriter))) - - // check both are equal - require.Equal(t, local.lifetimeSet, dst.lifetimeSet) - require.Equal(t, local.iat, dst.iat) - require.Equal(t, local.nbf, dst.nbf) - require.Equal(t, local.exp, dst.exp) - - // update lifetime - dst.SetExp(100) - dst.SetIat(200) - dst.SetNbf(300) - - // check that affects only dst - require.False(t, local.lifetimeSet) - require.True(t, dst.lifetimeSet) - require.NotEqual(t, local.iat, dst.iat) - require.NotEqual(t, local.nbf, dst.nbf) - require.NotEqual(t, local.exp, dst.exp) +func testSetSignature[T token, PTR interface { + *T + tokenptr +}](t *testing.T, setRequiredAPIFields func(PTR)) { + var c PTR = new(T) + + require.False(t, c.VerifySignature()) + + usr, otherUsr := usertest.TwoUsers() + usrSigner := neofsecdsa.SignerRFC6979(usr.PrivateKey) + otherUsrSigner := neofsecdsa.SignerRFC6979(otherUsr.PrivateKey) + + require.Error(t, c.SetSignature(usertest.FailSigner(usr))) + require.False(t, c.VerifySignature()) + require.Error(t, c.SetSignature(usertest.FailSigner(otherUsr))) + require.False(t, c.VerifySignature()) + + require.NoError(t, c.SetSignature(usrSigner)) + require.True(t, c.VerifySignature()) + require.Zero(t, c.Issuer()) + require.Equal(t, usr.PublicKeyBytes, c.IssuerPublicKeyBytes()) + + require.NoError(t, c.SetSignature(otherUsrSigner)) + require.True(t, c.VerifySignature()) + require.Zero(t, c.Issuer()) + require.Equal(t, otherUsr.PublicKeyBytes, c.IssuerPublicKeyBytes()) + + t.Run("encoding", func(t *testing.T) { + t.Run("binary", func(t *testing.T) { + var src, dst PTR = new(T), new(T) + + require.NoError(t, dst.SetSignature(usrSigner)) + err := dst.Unmarshal(src.Marshal()) + require.NoError(t, err) + require.False(t, dst.VerifySignature()) + require.Zero(t, dst.Issuer()) + require.Zero(t, dst.IssuerPublicKeyBytes()) + + require.NoError(t, dst.SetSignature(otherUsrSigner)) + require.NoError(t, src.SetSignature(usrSigner)) + err = dst.Unmarshal(src.Marshal()) + require.NoError(t, err) + require.True(t, dst.VerifySignature()) + require.Zero(t, dst.Issuer()) + require.Equal(t, usr.PublicKeyBytes, dst.IssuerPublicKeyBytes()) + + require.NoError(t, src.SetSignature(otherUsrSigner)) + err = dst.Unmarshal(src.Marshal()) + require.NoError(t, err) + require.True(t, dst.VerifySignature()) + require.Zero(t, dst.Issuer()) + require.Equal(t, otherUsr.PublicKeyBytes, dst.IssuerPublicKeyBytes()) + }) + t.Run("api", func(t *testing.T) { + var src, dst PTR = new(T), new(T) + var msg apisession.SessionToken + + // set required data just to satisfy decoder + setRequiredAPIFields(src) + + require.NoError(t, dst.SetSignature(usrSigner)) + src.WriteToV2(&msg) + require.Zero(t, msg.Signature) + require.NoError(t, dst.ReadFromV2(&msg)) + require.False(t, dst.VerifySignature()) + require.Equal(t, src.Issuer(), dst.Issuer()) + require.Zero(t, dst.IssuerPublicKeyBytes()) + + require.NoError(t, dst.SetSignature(otherUsrSigner)) + require.NoError(t, src.SetSignature(usrSigner)) + src.WriteToV2(&msg) + require.Equal(t, usr.PublicKeyBytes, msg.Signature.Key) + require.NotEmpty(t, msg.Signature.Sign) + require.EqualValues(t, usrSigner.Scheme(), msg.Signature.Scheme) + require.NoError(t, dst.ReadFromV2(&msg)) + require.True(t, dst.VerifySignature()) + require.Equal(t, src.Issuer(), dst.Issuer()) + require.Equal(t, usr.PublicKeyBytes, dst.IssuerPublicKeyBytes()) + + require.NoError(t, dst.SetSignature(usrSigner)) + require.NoError(t, src.SetSignature(otherUsrSigner)) + src.WriteToV2(&msg) + require.Equal(t, otherUsr.PublicKeyBytes, msg.Signature.Key) + require.NotEmpty(t, msg.Signature.Sign) + require.EqualValues(t, otherUsrSigner.Scheme(), msg.Signature.Scheme) + require.NoError(t, dst.ReadFromV2(&msg)) + require.True(t, dst.VerifySignature()) + require.Equal(t, src.Issuer(), dst.Issuer()) + require.Equal(t, otherUsr.PublicKeyBytes, dst.IssuerPublicKeyBytes()) + }) + t.Run("json", func(t *testing.T) { + var src, dst PTR = new(T), new(T) + + require.NoError(t, dst.SetSignature(usrSigner)) + j, err := src.MarshalJSON() + require.NoError(t, err) + err = dst.UnmarshalJSON(j) + require.NoError(t, err) + require.False(t, dst.VerifySignature()) + require.Zero(t, dst.Issuer()) + require.Zero(t, dst.IssuerPublicKeyBytes()) + + require.NoError(t, dst.SetSignature(otherUsrSigner)) + require.NoError(t, src.SetSignature(usrSigner)) + j, err = src.MarshalJSON() + require.NoError(t, err) + err = dst.UnmarshalJSON(j) + require.NoError(t, err) + require.True(t, dst.VerifySignature()) + require.Zero(t, dst.Issuer()) + require.Equal(t, usr.PublicKeyBytes, dst.IssuerPublicKeyBytes()) + + require.NoError(t, dst.SetSignature(usrSigner)) + require.NoError(t, src.SetSignature(otherUsrSigner)) + j, err = src.MarshalJSON() + require.NoError(t, err) + err = dst.UnmarshalJSON(j) + require.NoError(t, err) + require.True(t, dst.VerifySignature()) + require.Zero(t, dst.Issuer()) + require.Equal(t, otherUsr.PublicKeyBytes, dst.IssuerPublicKeyBytes()) + }) }) +} - t.Run("change sig", func(t *testing.T) { - var dst commonData - data.copyTo(&dst) - - require.Equal(t, data.sigSet, dst.sigSet) - require.Equal(t, data.sig.GetScheme(), dst.sig.GetScheme()) - require.True(t, bytes.Equal(data.sig.GetKey(), dst.sig.GetKey())) - require.True(t, bytes.Equal(data.sig.GetSign(), dst.sig.GetSign())) - - dst.sig.SetKey([]byte{1, 2, 3}) - dst.sig.SetScheme(100) - dst.sig.SetSign([]byte{10, 11, 12}) +func testIssuer[T token, PTR interface { + *T + tokenptr +}](t *testing.T, setRequiredAPIFields func(PTR)) { + var c PTR = new(T) + usr, otherUsr := usertest.TwoUsers() + + require.Zero(t, c.Issuer()) + + c.SetIssuer(usr.ID) + require.Equal(t, usr.ID, c.Issuer()) + + c.SetIssuer(otherUsr.ID) + require.Equal(t, otherUsr.ID, c.Issuer()) + + t.Run("encoding", func(t *testing.T) { + t.Run("binary", func(t *testing.T) { + var src, dst PTR = new(T), new(T) + + dst.SetIssuer(usr.ID) + err := dst.Unmarshal(src.Marshal()) + require.NoError(t, err) + require.Zero(t, dst.Issuer()) + + src.SetIssuer(usr.ID) + err = dst.Unmarshal(src.Marshal()) + require.NoError(t, err) + require.Equal(t, usr.ID, dst.Issuer()) + }) + t.Run("api", func(t *testing.T) { + var src, dst PTR = new(T), new(T) + var msg apisession.SessionToken + + // set required data just to satisfy decoder + setRequiredAPIFields(src) + + src.SetIssuer(usr.ID) + src.WriteToV2(&msg) + require.Equal(t, &refs.OwnerID{Value: usr.ID[:]}, msg.Body.OwnerId) + require.NoError(t, dst.ReadFromV2(&msg)) + require.Equal(t, usr.ID, dst.Issuer()) + }) + t.Run("json", func(t *testing.T) { + var src, dst PTR = new(T), new(T) + + dst.SetIssuer(usr.ID) + j, err := src.MarshalJSON() + require.NoError(t, err) + err = dst.UnmarshalJSON(j) + require.NoError(t, err) + require.Zero(t, dst.Issuer()) + + src.SetIssuer(usr.ID) + j, err = src.MarshalJSON() + require.NoError(t, err) + err = dst.UnmarshalJSON(j) + require.NoError(t, err) + require.Equal(t, usr.ID, dst.Issuer()) + }) + }) +} - require.Equal(t, data.issuerSet, dst.issuerSet) - require.NotEqual(t, data.sig.GetScheme(), dst.sig.GetScheme()) - require.False(t, bytes.Equal(data.sig.GetKey(), dst.sig.GetKey())) - require.False(t, bytes.Equal(data.sig.GetSign(), dst.sig.GetSign())) +func testID[T token, PTR interface { + *T + tokenptr +}](t *testing.T, setRequiredAPIFields func(PTR)) { + var c PTR = new(T) + + require.Zero(t, c.ID()) + + id := uuid.New() + c.SetID(id) + require.Equal(t, id, c.ID()) + + otherID := id + otherID[0]++ + c.SetID(otherID) + require.Equal(t, otherID, c.ID()) + + t.Run("encoding", func(t *testing.T) { + t.Run("binary", func(t *testing.T) { + var src, dst PTR = new(T), new(T) + + dst.SetID(id) + err := dst.Unmarshal(src.Marshal()) + require.NoError(t, err) + require.Zero(t, dst.ID()) + + src.SetID(id) + err = dst.Unmarshal(src.Marshal()) + require.NoError(t, err) + require.Equal(t, id, dst.ID()) + }) + t.Run("api", func(t *testing.T) { + var src, dst PTR = new(T), new(T) + var msg apisession.SessionToken + + // set required data just to satisfy decoder + setRequiredAPIFields(src) + + src.SetID(id) + src.WriteToV2(&msg) + require.Equal(t, id[:], msg.Body.Id) + require.NoError(t, dst.ReadFromV2(&msg)) + require.Equal(t, id, dst.ID()) + }) + t.Run("json", func(t *testing.T) { + var src, dst PTR = new(T), new(T) + + dst.SetID(id) + j, err := src.MarshalJSON() + require.NoError(t, err) + err = dst.UnmarshalJSON(j) + require.NoError(t, err) + require.Zero(t, dst.ID()) + + src.SetID(id) + j, err = src.MarshalJSON() + require.NoError(t, err) + err = dst.UnmarshalJSON(j) + require.NoError(t, err) + require.Equal(t, id, dst.ID()) + }) }) +} - t.Run("overwrite sig", func(t *testing.T) { - local := commonData{} - require.False(t, local.sigSet) +func testInvalidAt[T token, PTR interface { + *T + tokenptr +}](t testing.TB, _ T) { + var x PTR = new(T) - emptyWriter := func() session.TokenContext { - return &session.ContainerSessionContext{} - } + nbf := rand.Uint64() + if nbf == math.MaxUint64 { + nbf-- + } - var dst commonData - require.NoError(t, dst.sign(signer, emptyWriter)) - require.True(t, dst.sigSet) + iat := nbf + exp := iat + 1 - local.copyTo(&dst) - require.False(t, local.sigSet) - require.False(t, dst.sigSet) + x.SetNbf(nbf) + x.SetIat(iat) + x.SetExp(exp) - require.True(t, bytes.Equal(local.marshal(emptyWriter), dst.marshal(emptyWriter))) + require.True(t, x.InvalidAt(nbf-1)) + require.True(t, x.InvalidAt(iat-1)) + require.False(t, x.InvalidAt(iat)) + require.False(t, x.InvalidAt(exp)) + require.True(t, x.InvalidAt(exp+1)) +} - require.NoError(t, dst.sign(signer, emptyWriter)) - require.False(t, local.sigSet) - require.True(t, dst.sigSet) +func testLifetimeField[T token, PTR interface { + *T + tokenptr +}]( + t *testing.T, + get func(T) uint64, + set func(PTR, uint64), + getAPI func(token *apisession.SessionToken_Body_TokenLifetime) uint64, + setRequiredAPIFields func(PTR), +) { + var c T + + require.Zero(t, get(c)) + + val := rand.Uint64() + set(&c, val) + require.EqualValues(t, val, get(c)) + + otherVal := val * 100 + set(&c, otherVal) + require.EqualValues(t, otherVal, get(c)) + + t.Run("encoding", func(t *testing.T) { + t.Run("binary", func(t *testing.T) { + var src, dst PTR = new(T), new(T) + + set(dst, val) + err := dst.Unmarshal(src.Marshal()) + require.NoError(t, err) + require.Zero(t, get(*dst)) + + set(src, val) + err = dst.Unmarshal(src.Marshal()) + require.NoError(t, err) + require.EqualValues(t, val, get(*dst)) + }) + t.Run("api", func(t *testing.T) { + var src, dst T + var srcPtr, dstPtr PTR = &src, &dst + var msg apisession.SessionToken + + // set required data just to satisfy decoder + setRequiredAPIFields(srcPtr) + + set(&src, val) + src.WriteToV2(&msg) + require.EqualValues(t, val, getAPI(msg.Body.Lifetime)) + require.NoError(t, dstPtr.ReadFromV2(&msg)) + require.EqualValues(t, val, get(dst)) + }) + t.Run("json", func(t *testing.T) { + var src, dst T + var dstPtr PTR = &dst + + set(&dst, val) + j, err := src.MarshalJSON() + require.NoError(t, err) + err = dstPtr.UnmarshalJSON(j) + require.NoError(t, err) + require.Zero(t, get(dst)) + + set(&src, val) + j, err = src.MarshalJSON() + require.NoError(t, err) + err = dstPtr.UnmarshalJSON(j) + require.NoError(t, err) + require.EqualValues(t, val, get(dst)) + }) }) } diff --git a/session/container.go b/session/container.go index 6f920bd06..9538b2e97 100644 --- a/session/container.go +++ b/session/container.go @@ -4,11 +4,14 @@ import ( "errors" "fmt" - "github.com/nspcc-dev/neofs-api-go/v2/refs" - "github.com/nspcc-dev/neofs-api-go/v2/session" + "github.com/nspcc-dev/neofs-sdk-go/api/refs" + "github.com/nspcc-dev/neofs-sdk-go/api/session" cid "github.com/nspcc-dev/neofs-sdk-go/container/id" neofscrypto "github.com/nspcc-dev/neofs-sdk-go/crypto" + neofsecdsa "github.com/nspcc-dev/neofs-sdk-go/crypto/ecdsa" "github.com/nspcc-dev/neofs-sdk-go/user" + "google.golang.org/protobuf/encoding/protojson" + "google.golang.org/protobuf/proto" ) // Container represents token of the NeoFS Container session. A session is opened @@ -17,8 +20,8 @@ import ( // limited validity period, and applies to a strictly defined set of operations. // See methods for details. // -// Container is mutually compatible with github.com/nspcc-dev/neofs-api-go/v2/session.Token -// message. See ReadFromV2 / WriteToV2 methods. +// Container is mutually compatible with [session.SessionToken] message. See +// [Container.ReadFromV2] / [Container.WriteToV2] methods. // // Instances can be created using built-in var declaration. type Container struct { @@ -41,100 +44,136 @@ func (x Container) CopyTo(dst *Container) { dst.cnr = contID } -// readContext is a contextReader needed for commonData methods. -func (x *Container) readContext(c session.TokenContext, checkFieldPresence bool) error { - cCnr, ok := c.(*session.ContainerSessionContext) - if !ok || cCnr == nil { - return fmt.Errorf("invalid context %T", c) +func (x *Container) readFromV2(m *session.SessionToken, checkFieldPresence bool) error { + err := x.commonData.readFromV2(m, checkFieldPresence) + if err != nil { + return err + } + + var ctx *session.ContainerSessionContext + if c := m.GetBody().GetContext(); c != nil { + cc, ok := c.(*session.SessionToken_Body_Container) + if !ok { + return errors.New("wrong context field") + } + ctx = cc.Container + } else if checkFieldPresence { + return errors.New("missing session context") + } else { + x.cnrSet = false + x.verb = 0 + return nil } - x.cnrSet = !cCnr.Wildcard() - cnr := cCnr.ContainerID() + x.cnrSet = !ctx.GetWildcard() + cnr := ctx.GetContainerId() if x.cnrSet { if cnr != nil { - err := x.cnr.ReadFromV2(*cnr) + err := x.cnr.ReadFromV2(cnr) if err != nil { - return fmt.Errorf("invalid container ID: %w", err) + return fmt.Errorf("invalid context: invalid container ID: %w", err) } - } else if checkFieldPresence { - return errors.New("missing container or wildcard flag") + } else { + return errors.New("invalid context: missing container or wildcard flag") } } else if cnr != nil { - return errors.New("container conflicts with wildcard flag") + return errors.New("invalid context: container conflicts with wildcard flag") } - x.verb = ContainerVerb(cCnr.Verb()) + x.verb = ContainerVerb(ctx.GetVerb()) return nil } -func (x *Container) readFromV2(m session.Token, checkFieldPresence bool) error { - return x.commonData.readFromV2(m, checkFieldPresence, x.readContext) -} - -// ReadFromV2 reads Container from the session.Token message. Checks if the -// message conforms to NeoFS API V2 protocol. +// ReadFromV2 reads Container from the [session.SessionToken] message. Returns +// an error if the message is malformed according to the NeoFS API V2 protocol. +// The message must not be nil. +// +// ReadFromV2 is intended to be used by the NeoFS API V2 client/server +// implementation only and is not expected to be directly used by applications. // -// See also WriteToV2. -func (x *Container) ReadFromV2(m session.Token) error { +// See also [Container.WriteToV2]. +func (x *Container) ReadFromV2(m *session.SessionToken) error { return x.readFromV2(m, true) } -func (x Container) writeContext() session.TokenContext { - var c session.ContainerSessionContext - c.SetWildcard(!x.cnrSet) - c.SetVerb(session.ContainerSessionVerb(x.verb)) - +func (x Container) fillContext() *session.SessionToken_Body_Container { + c := session.SessionToken_Body_Container{ + Container: &session.ContainerSessionContext{ + Verb: session.ContainerSessionContext_Verb(x.verb), + Wildcard: !x.cnrSet, + }, + } if x.cnrSet { - var cnr refs.ContainerID - x.cnr.WriteToV2(&cnr) - - c.SetContainerID(&cnr) + c.Container.ContainerId = new(refs.ContainerID) + x.cnr.WriteToV2(c.Container.ContainerId) } - return &c } -// WriteToV2 writes Container to the session.Token message. -// The message must not be nil. +// WriteToV2 writes Container to the [session.SessionToken] message of the NeoFS +// API protocol. +// +// WriteToV2 is intended to be used by the NeoFS API V2 client/server +// implementation only and is not expected to be directly used by applications. // -// See also ReadFromV2. -func (x Container) WriteToV2(m *session.Token) { - x.writeToV2(m, x.writeContext) +// See also [Container.ReadFromV2]. +func (x Container) WriteToV2(m *session.SessionToken) { + x.writeToV2(m) + m.Body.Context = x.fillContext() } // Marshal encodes Container into a binary format of the NeoFS API protocol -// (Protocol Buffers with direct field order). +// (Protocol Buffers V3 with direct field order). // -// See also Unmarshal. +// See also [Container.Unmarshal]. func (x Container) Marshal() []byte { - return x.marshal(x.writeContext) + var m session.SessionToken + x.WriteToV2(&m) + b := make([]byte, m.MarshaledSize()) + m.MarshalStable(b) + return b } -// Unmarshal decodes NeoFS API protocol binary format into the Container -// (Protocol Buffers with direct field order). Returns an error describing -// a format violation. +// Unmarshal decodes Protocol Buffers V3 binary data into the Container. Returns +// an error describing a format violation of the specified fields. Unmarshal +// does not check presence of the required fields and, at the same time, checks +// format of presented fields. // -// See also Marshal. +// See also [Container.Marshal]. func (x *Container) Unmarshal(data []byte) error { - return x.unmarshal(data, x.readContext) + var m session.SessionToken + err := proto.Unmarshal(data, &m) + if err != nil { + return fmt.Errorf("decode protobuf: %w", err) + } + return x.readFromV2(&m, false) } // MarshalJSON encodes Container into a JSON format of the NeoFS API protocol -// (Protocol Buffers JSON). +// (Protocol Buffers V3 JSON). // -// See also UnmarshalJSON. +// See also [Container.UnmarshalJSON]. func (x Container) MarshalJSON() ([]byte, error) { - return x.marshalJSON(x.writeContext) + var m session.SessionToken + x.WriteToV2(&m) + return protojson.Marshal(&m) } -// UnmarshalJSON decodes NeoFS API protocol JSON format into the Container -// (Protocol Buffers JSON). Returns an error describing a format violation. +// UnmarshalJSON decodes NeoFS API protocol JSON data into the Container +// (Protocol Buffers V3 JSON). Returns an error describing a format violation. +// UnmarshalJSON does not check presence of the required fields and, at the same +// time, checks format of presented fields. // -// See also MarshalJSON. +// See also [Container.MarshalJSON]. func (x *Container) UnmarshalJSON(data []byte) error { - return x.unmarshalJSON(data, x.readContext) + var m session.SessionToken + err := protojson.Unmarshal(data, &m) + if err != nil { + return fmt.Errorf("decode protojson: %w", err) + } + return x.readFromV2(&m, false) } // Sign calculates and writes signature of the [Container] data along with @@ -155,43 +194,51 @@ func (x *Container) Sign(signer user.Signer) error { // SetSignature allows to sign Container like [Container.Sign] but without // issuer setting. func (x *Container) SetSignature(signer neofscrypto.Signer) error { - return x.sign(signer, x.writeContext) + err := x.sig.Calculate(signer, x.SignedData()) + if err != nil { + return err + } + x.sigSet = true + return nil } -// SignedData returns actual payload to sign. +// SignedData returns signed data of the Container. // -// Using this method require to set issuer via [Container.SetIssuer] before SignedData call. +// [Container.SetIssuer] must be called before. // // See also [Container.Sign], [Container.UnmarshalSignedData]. -func (x *Container) SignedData() []byte { - return x.signedData(x.writeContext) +func (x Container) SignedData() []byte { + body := x.fillBody() + body.Context = x.fillContext() + b := make([]byte, body.MarshaledSize()) + body.MarshalStable(b) + return b } // UnmarshalSignedData is a reverse op to [Container.SignedData]. func (x *Container) UnmarshalSignedData(data []byte) error { - var body session.TokenBody - err := body.Unmarshal(data) + var body session.SessionToken_Body + err := proto.Unmarshal(data, &body) if err != nil { - return fmt.Errorf("decode body: %w", err) + return fmt.Errorf("decode protobuf: %w", err) } - var tok session.Token - tok.SetBody(&body) - return x.readFromV2(tok, false) + return x.readFromV2(&session.SessionToken{Body: &body}, false) } // VerifySignature checks if Container signature is presented and valid. // // Zero Container fails the check. // -// See also Sign. +// See also [Container.Sign], [Container.SetSignature]. func (x Container) VerifySignature() bool { - return x.verifySignature(x.writeContext) + // TODO: (#233) check owner<->key relation + return x.sigSet && x.sig.Verify(x.SignedData()) } // ApplyOnlyTo limits session scope to a given author container. // -// See also AppliedTo. +// See also [Container.AppliedTo]. func (x *Container) ApplyOnlyTo(cnr cid.ID) { x.cnr = cnr x.cnrSet = true @@ -201,13 +248,13 @@ func (x *Container) ApplyOnlyTo(cnr cid.ID) { // // Zero Container is applied to all author's containers. // -// See also ApplyOnlyTo. +// See also [Container.ApplyOnlyTo]. func (x Container) AppliedTo(cnr cid.ID) bool { - return !x.cnrSet || x.cnr.Equals(cnr) + return !x.cnrSet || x.cnr == cnr } // ContainerVerb enumerates container operations. -type ContainerVerb int8 +type ContainerVerb uint8 const ( _ ContainerVerb = iota @@ -220,7 +267,7 @@ const ( // ForVerb specifies the container operation of the session scope. Each // Container is related to the single operation. // -// See also AssertVerb. +// See also [Container.AssertVerb]. func (x *Container) ForVerb(verb ContainerVerb) { x.verb = verb } @@ -229,27 +276,21 @@ func (x *Container) ForVerb(verb ContainerVerb) { // // Zero Container relates to zero (unspecified) verb. // -// See also ForVerb. +// See also [Container.ForVerb]. func (x Container) AssertVerb(verb ContainerVerb) bool { return x.verb == verb } // IssuedBy checks if Container session is issued by the given user. // -// See also Container.Issuer. +// See also [Container.Issuer]. func IssuedBy(cnr Container, id user.ID) bool { - return cnr.Issuer().Equals(id) + return cnr.Issuer() == id } // VerifySessionDataSignature verifies signature of the session data. In practice, // the method is used to authenticate an operation with session data. func (x Container) VerifySessionDataSignature(data, signature []byte) bool { - var sigV2 refs.Signature - sigV2.SetKey(x.authKey) - sigV2.SetScheme(refs.ECDSA_RFC6979_SHA256) - sigV2.SetSign(signature) - - var sig neofscrypto.Signature - - return sig.ReadFromV2(sigV2) == nil && sig.Verify(data) + var pubKey neofsecdsa.PublicKeyRFC6979 + return pubKey.Decode(x.authKey) == nil && pubKey.Verify(data, signature) } diff --git a/session/container_internal_test.go b/session/container_internal_test.go deleted file mode 100644 index 3cbfbe80e..000000000 --- a/session/container_internal_test.go +++ /dev/null @@ -1,70 +0,0 @@ -package session - -import ( - "bytes" - "testing" - - "github.com/nspcc-dev/neofs-api-go/v2/session" - cidtest "github.com/nspcc-dev/neofs-sdk-go/container/id/test" - "github.com/stretchr/testify/require" -) - -func TestContainer_CopyTo(t *testing.T) { - var container Container - - containerID := cidtest.ID() - - container.ForVerb(VerbContainerDelete) - container.ApplyOnlyTo(containerID) - - t.Run("copy", func(t *testing.T) { - var dst Container - container.CopyTo(&dst) - - emptyWriter := func() session.TokenContext { - return &session.ContainerSessionContext{} - } - - require.Equal(t, container, dst) - require.True(t, bytes.Equal(container.marshal(emptyWriter), dst.marshal(emptyWriter))) - }) - - t.Run("change", func(t *testing.T) { - var dst Container - container.CopyTo(&dst) - - require.Equal(t, container.verb, dst.verb) - require.True(t, container.cnrSet) - require.True(t, dst.cnrSet) - - container.ForVerb(VerbContainerSetEACL) - - require.NotEqual(t, container.verb, dst.verb) - require.True(t, container.cnrSet) - require.True(t, dst.cnrSet) - }) - - t.Run("overwrite container id", func(t *testing.T) { - var local Container - require.False(t, local.cnrSet) - - var dst Container - dst.ApplyOnlyTo(containerID) - require.True(t, dst.cnrSet) - - local.CopyTo(&dst) - emptyWriter := func() session.TokenContext { - return &session.ContainerSessionContext{} - } - - require.Equal(t, local, dst) - require.True(t, bytes.Equal(local.marshal(emptyWriter), dst.marshal(emptyWriter))) - - require.False(t, local.cnrSet) - require.False(t, dst.cnrSet) - - dst.ApplyOnlyTo(containerID) - require.True(t, dst.cnrSet) - require.False(t, local.cnrSet) - }) -} diff --git a/session/container_test.go b/session/container_test.go index 84af8c907..f03a432ec 100644 --- a/session/container_test.go +++ b/session/container_test.go @@ -1,624 +1,320 @@ package session_test import ( - "bytes" - "fmt" - "math" - "math/rand" + mrand "math/rand" "testing" - "github.com/google/uuid" - "github.com/nspcc-dev/neofs-api-go/v2/refs" - v2session "github.com/nspcc-dev/neofs-api-go/v2/session" + "github.com/nspcc-dev/neofs-sdk-go/api/refs" + apisession "github.com/nspcc-dev/neofs-sdk-go/api/session" cidtest "github.com/nspcc-dev/neofs-sdk-go/container/id/test" - neofscrypto "github.com/nspcc-dev/neofs-sdk-go/crypto" - "github.com/nspcc-dev/neofs-sdk-go/crypto/test" "github.com/nspcc-dev/neofs-sdk-go/session" sessiontest "github.com/nspcc-dev/neofs-sdk-go/session/test" - "github.com/nspcc-dev/neofs-sdk-go/user" usertest "github.com/nspcc-dev/neofs-sdk-go/user/test" "github.com/stretchr/testify/require" ) -func TestContainerProtocolV2(t *testing.T) { - var validV2 v2session.Token - - var body v2session.TokenBody - validV2.SetBody(&body) - - // ID - id := uuid.New() - binID, err := id.MarshalBinary() - require.NoError(t, err) - restoreID := func() { - body.SetID(binID) - } - restoreID() - - // Owner - usr := usertest.ID(t) - var usrV2 refs.OwnerID - usr.WriteToV2(&usrV2) - restoreUser := func() { - body.SetOwnerID(&usrV2) - } - restoreUser() - - // Lifetime - var lifetime v2session.TokenLifetime - lifetime.SetIat(1) - lifetime.SetNbf(2) - lifetime.SetExp(3) - restoreLifetime := func() { - body.SetLifetime(&lifetime) - } - restoreLifetime() - - // Session key - signer := test.RandomSignerRFC6979(t) - authKey := signer.Public() - binAuthKey := neofscrypto.PublicKeyBytes(authKey) - restoreAuthKey := func() { - body.SetSessionKey(binAuthKey) - } - restoreAuthKey() - - // Context - cnr := cidtest.ID() - var cnrV2 refs.ContainerID - cnr.WriteToV2(&cnrV2) - var cCnr v2session.ContainerSessionContext - restoreCtx := func() { - cCnr.SetContainerID(&cnrV2) - cCnr.SetWildcard(false) - body.SetContext(&cCnr) - } - restoreCtx() +var knownContainerVerbs = map[session.ContainerVerb]apisession.ContainerSessionContext_Verb{ + session.VerbContainerPut: apisession.ContainerSessionContext_PUT, + session.VerbContainerDelete: apisession.ContainerSessionContext_DELETE, + session.VerbContainerSetEACL: apisession.ContainerSessionContext_SETEACL, +} - // Signature - var sig refs.Signature - restoreSig := func() { - validV2.SetSignature(&sig) - } - restoreSig() - - // TODO(@cthulhu-rider): #260 use functionality for message corruption - - for _, testcase := range []struct { - name string - corrupt []func() - restore func() - assert func(session.Container) - breakSign func(*v2session.Token) - }{ - { - name: "Signature", - corrupt: []func(){ - func() { - validV2.SetSignature(nil) - }, - }, - restore: restoreSig, - }, - { - name: "ID", - corrupt: []func(){ - func() { - body.SetID([]byte{1, 2, 3}) - }, - func() { - id, err := uuid.NewDCEPerson() - require.NoError(t, err) - bindID, err := id.MarshalBinary() - require.NoError(t, err) - body.SetID(bindID) - }, - }, - restore: restoreID, - assert: func(val session.Container) { - require.Equal(t, id, val.ID()) - }, - breakSign: func(m *v2session.Token) { - id := m.GetBody().GetID() - id[len(id)-1]++ - }, - }, - { - name: "User", - corrupt: []func(){ - func() { - var brokenUsrV2 refs.OwnerID - brokenUsrV2.SetValue(append(usrV2.GetValue(), 1)) - body.SetOwnerID(&brokenUsrV2) - }, - }, - restore: restoreUser, - assert: func(val session.Container) { - require.Equal(t, usr, val.Issuer()) - }, - breakSign: func(m *v2session.Token) { - id := m.GetBody().GetOwnerID().GetValue() - copy(id, usertest.ID(t).WalletBytes()) - }, - }, - { - name: "Lifetime", - corrupt: []func(){ - func() { - body.SetLifetime(nil) - }, - }, - restore: restoreLifetime, - assert: func(val session.Container) { - require.True(t, val.InvalidAt(1)) - require.False(t, val.InvalidAt(2)) - require.False(t, val.InvalidAt(3)) - require.True(t, val.InvalidAt(4)) - }, - breakSign: func(m *v2session.Token) { - lt := m.GetBody().GetLifetime() - lt.SetIat(lt.GetIat() + 1) - }, - }, - { - name: "Auth key", - corrupt: []func(){ - func() { - body.SetSessionKey(nil) - }, - func() { - body.SetSessionKey([]byte{}) - }, - }, - restore: restoreAuthKey, - assert: func(val session.Container) { - require.True(t, val.AssertAuthKey(authKey)) - }, - breakSign: func(m *v2session.Token) { - body := m.GetBody() - key := body.GetSessionKey() - cp := bytes.Clone(key) - cp[len(cp)-1]++ - body.SetSessionKey(cp) - }, - }, - { - name: "Context", - corrupt: []func(){ - func() { - body.SetContext(nil) - }, - func() { - cCnr.SetWildcard(true) - }, - func() { - cCnr.SetContainerID(nil) - }, - func() { - var brokenCnr refs.ContainerID - brokenCnr.SetValue(append(cnrV2.GetValue(), 1)) - cCnr.SetContainerID(&brokenCnr) - }, - }, - restore: restoreCtx, - assert: func(val session.Container) { - require.True(t, val.AppliedTo(cnr)) - require.False(t, val.AppliedTo(cidtest.ID())) - }, - breakSign: func(m *v2session.Token) { - cnr := m.GetBody().GetContext().(*v2session.ContainerSessionContext).ContainerID().GetValue() - cnr[len(cnr)-1]++ - }, - }, - } { - var val session.Container - - for i, corrupt := range testcase.corrupt { - corrupt() - require.Error(t, val.ReadFromV2(validV2), testcase.name, fmt.Sprintf("corrupt #%d", i)) - - testcase.restore() - require.NoError(t, val.ReadFromV2(validV2), testcase.name) - - if testcase.assert != nil { - testcase.assert(val) +func setRequiredContainerAPIFields(c *session.Container) { setRequiredTokenAPIFields(c) } + +func TestContainerDecoding(t *testing.T) { + testDecoding(t, sessiontest.Container, []invalidAPITestCase{ + {name: "body/context/wrong", err: "wrong context field", corrupt: func(st *apisession.SessionToken) { + st.Body.Context = new(apisession.SessionToken_Body_Object) + }}, + {name: "body/context/wrapped field/nil", err: "invalid context: missing container or wildcard flag", corrupt: func(st *apisession.SessionToken) { + st.Body.Context.(*apisession.SessionToken_Body_Container).Container = nil + }}, + {name: "body/context/wrapped field/empty", err: "invalid context: missing container or wildcard flag", corrupt: func(st *apisession.SessionToken) { + st.Body.Context.(*apisession.SessionToken_Body_Container).Container = new(apisession.ContainerSessionContext) + }}, + {name: "body/context/container conflict", err: "invalid context: container conflicts with wildcard flag", corrupt: func(st *apisession.SessionToken) { + st.Body.Context.(*apisession.SessionToken_Body_Container).Container = &apisession.ContainerSessionContext{ + Wildcard: true, + ContainerId: new(refs.ContainerID), } - - if testcase.breakSign != nil { - require.NoError(t, val.Sign(signer), testcase.name) - require.True(t, val.VerifySignature(), testcase.name) - - var signedV2 v2session.Token - val.WriteToV2(&signedV2) - - var restored session.Container - require.NoError(t, restored.ReadFromV2(signedV2), testcase.name) - require.True(t, restored.VerifySignature(), testcase.name) - - testcase.breakSign(&signedV2) - - require.NoError(t, restored.ReadFromV2(signedV2), testcase.name) - require.False(t, restored.VerifySignature(), testcase.name) + }}, + {name: "body/context/container/value/nil", err: "invalid context: invalid container ID: missing value field", corrupt: func(st *apisession.SessionToken) { + st.Body.Context.(*apisession.SessionToken_Body_Container).Container = &apisession.ContainerSessionContext{ + ContainerId: &refs.ContainerID{Value: nil}, } - } - } -} - -func TestContainer_WriteToV2(t *testing.T) { - var val session.Container - - assert := func(baseAssert func(v2session.Token)) { - var m v2session.Token - val.WriteToV2(&m) - baseAssert(m) - } - - // ID - id := uuid.New() - - binID, err := id.MarshalBinary() - require.NoError(t, err) - - val.SetID(id) - assert(func(m v2session.Token) { - require.Equal(t, binID, m.GetBody().GetID()) - }) - - // Owner/Signature - signer := test.RandomSignerRFC6979(t) - - require.NoError(t, val.Sign(signer)) - - usr := signer.UserID() - - var usrV2 refs.OwnerID - usr.WriteToV2(&usrV2) - - assert(func(m v2session.Token) { - require.Equal(t, &usrV2, m.GetBody().GetOwnerID()) - - sig := m.GetSignature() - require.NotZero(t, sig.GetKey()) - require.NotZero(t, sig.GetSign()) - }) - - // Lifetime - const iat, nbf, exp = 1, 2, 3 - val.SetIat(iat) - val.SetNbf(nbf) - val.SetExp(exp) - - assert(func(m v2session.Token) { - lt := m.GetBody().GetLifetime() - require.EqualValues(t, iat, lt.GetIat()) - require.EqualValues(t, nbf, lt.GetNbf()) - require.EqualValues(t, exp, lt.GetExp()) + }}, + {name: "body/context/container/value/empty", err: "invalid context: invalid container ID: missing value field", corrupt: func(st *apisession.SessionToken) { + st.Body.Context.(*apisession.SessionToken_Body_Container).Container = &apisession.ContainerSessionContext{ + ContainerId: &refs.ContainerID{Value: []byte{}}, + } + }}, + {name: "body/context/container/value/wrong length", err: "invalid context: invalid container ID: invalid value length 31", corrupt: func(st *apisession.SessionToken) { + st.Body.Context.(*apisession.SessionToken_Body_Container).Container = &apisession.ContainerSessionContext{ + ContainerId: &refs.ContainerID{Value: make([]byte, 31)}, + } + }}, }) +} - // Context - assert(func(m v2session.Token) { - cCnr, ok := m.GetBody().GetContext().(*v2session.ContainerSessionContext) - require.True(t, ok) - require.True(t, cCnr.Wildcard()) - require.Zero(t, cCnr.ContainerID()) +func TestContainer_Unmarshal(t *testing.T) { + t.Run("invalid binary", func(t *testing.T) { + var c session.Container + msg := []byte("definitely_not_protobuf") + err := c.Unmarshal(msg) + require.ErrorContains(t, err, "decode protobuf") }) +} - cnr := cidtest.ID() - - var cnrV2 refs.ContainerID - cnr.WriteToV2(&cnrV2) - - val.ApplyOnlyTo(cnr) - - assert(func(m v2session.Token) { - cCnr, ok := m.GetBody().GetContext().(*v2session.ContainerSessionContext) - require.True(t, ok) - require.False(t, cCnr.Wildcard()) - require.Equal(t, &cnrV2, cCnr.ContainerID()) +func TestContainer_UnmarshalJSON(t *testing.T) { + t.Run("invalid json", func(t *testing.T) { + var c session.Container + msg := []byte("definitely_not_protojson") + err := c.UnmarshalJSON(msg) + require.ErrorContains(t, err, "decode protojson") }) } -func TestContainer_ApplyOnlyTo(t *testing.T) { - var val session.Container - var m v2session.Token - filled := sessiontest.Container() - - assertDefaults := func() { - cCnr, ok := m.GetBody().GetContext().(*v2session.ContainerSessionContext) - require.True(t, ok) - require.True(t, cCnr.Wildcard()) - require.Zero(t, cCnr.ContainerID()) - } - - assertBinary := func(baseAssert func()) { - val2 := filled - - require.NoError(t, val2.Unmarshal(val.Marshal())) - baseAssert() - } - - assertJSON := func(baseAssert func()) { - val2 := filled - - jd, err := val.MarshalJSON() - require.NoError(t, err) - - require.NoError(t, val2.UnmarshalJSON(jd)) - baseAssert() - } - - val.WriteToV2(&m) - - assertDefaults() - assertBinary(assertDefaults) - assertJSON(assertDefaults) - - // set value - - cnr := cidtest.ID() - - var cnrV2 refs.ContainerID - cnr.WriteToV2(&cnrV2) - - val.ApplyOnlyTo(cnr) - - val.WriteToV2(&m) - - assertCnr := func() { - cCnr, ok := m.GetBody().GetContext().(*v2session.ContainerSessionContext) - require.True(t, ok) - require.False(t, cCnr.Wildcard()) - require.Equal(t, &cnrV2, cCnr.ContainerID()) - } - - assertCnr() - assertBinary(assertCnr) - assertJSON(assertCnr) +func TestContainer_CopyTo(t *testing.T) { + testCopyTo(t, sessiontest.Container()) } -func TestContainer_AppliedTo(t *testing.T) { - var x session.Container - - cnr1 := cidtest.ID() - cnr2 := cidtest.ID() - - require.True(t, x.AppliedTo(cnr1)) - require.True(t, x.AppliedTo(cnr2)) - - x.ApplyOnlyTo(cnr1) - - require.True(t, x.AppliedTo(cnr1)) - require.False(t, x.AppliedTo(cnr2)) +func TestContainer_UnmarshalSignedData(t *testing.T) { + testSignedData(t, sessiontest.ContainerUnsigned()) } -func TestContainer_InvalidAt(t *testing.T) { - var x session.Container - - nbf := rand.Uint64() - if nbf == math.MaxUint64 { - nbf-- - } - - iat := nbf - exp := iat + 1 - - x.SetNbf(nbf) - x.SetIat(iat) - x.SetExp(exp) - - require.True(t, x.InvalidAt(nbf-1)) - require.True(t, x.InvalidAt(iat-1)) - require.False(t, x.InvalidAt(iat)) - require.False(t, x.InvalidAt(exp)) - require.True(t, x.InvalidAt(exp+1)) +func TestContainer_SetAuthKey(t *testing.T) { + testAuthKey(t, setRequiredContainerAPIFields) } -func TestContainer_ID(t *testing.T) { - var x session.Container - - require.Zero(t, x.ID()) - - id := uuid.New() - - x.SetID(id) - - require.Equal(t, id, x.ID()) +func TestContainer_Sign(t *testing.T) { + testSign(t, setRequiredContainerAPIFields) } -func TestContainer_AssertAuthKey(t *testing.T) { - var x session.Container - - key := test.RandomSignerRFC6979(t).Public() - - require.False(t, x.AssertAuthKey(key)) - - x.SetAuthKey(key) - - require.True(t, x.AssertAuthKey(key)) +func TestContainer_SetSignature(t *testing.T) { + testSetSignature(t, setRequiredContainerAPIFields) } -func TestContainer_ForVerb(t *testing.T) { - var val session.Container - var m v2session.Token - filled := sessiontest.Container() - - assertDefaults := func() { - cCnr, ok := m.GetBody().GetContext().(*v2session.ContainerSessionContext) - require.True(t, ok) - require.Zero(t, cCnr.Verb()) - } - - assertBinary := func(baseAssert func()) { - val2 := filled - - require.NoError(t, val2.Unmarshal(val.Marshal())) - baseAssert() - } - - assertJSON := func(baseAssert func()) { - val2 := filled - - jd, err := val.MarshalJSON() - require.NoError(t, err) - - require.NoError(t, val2.UnmarshalJSON(jd)) - baseAssert() - } - - val.WriteToV2(&m) - - assertDefaults() - assertBinary(assertDefaults) - assertJSON(assertDefaults) - - // set value - - assertVerb := func(verb v2session.ContainerSessionVerb) { - cCnr, ok := m.GetBody().GetContext().(*v2session.ContainerSessionContext) - require.True(t, ok) - require.Equal(t, verb, cCnr.Verb()) - } - - for from, to := range map[session.ContainerVerb]v2session.ContainerSessionVerb{ - session.VerbContainerPut: v2session.ContainerVerbPut, - session.VerbContainerDelete: v2session.ContainerVerbDelete, - session.VerbContainerSetEACL: v2session.ContainerVerbSetEACL, - } { - val.ForVerb(from) - - val.WriteToV2(&m) +func TestContainer_SetIssuer(t *testing.T) { + testIssuer(t, setRequiredContainerAPIFields) +} - assertVerb(to) - assertBinary(func() { assertVerb(to) }) - assertJSON(func() { assertVerb(to) }) - } +func TestContainer_SetID(t *testing.T) { + testID(t, setRequiredContainerAPIFields) } -func TestContainer_AssertVerb(t *testing.T) { - var x session.Container +func TestContainer_InvalidAt(t *testing.T) { + testInvalidAt(t, session.Container{}) +} - const v1, v2 = session.VerbContainerPut, session.VerbContainerDelete +func TestContainer_SetExp(t *testing.T) { + testLifetimeField(t, session.Container.Exp, (*session.Container).SetExp, (*apisession.SessionToken_Body_TokenLifetime).GetExp, setRequiredContainerAPIFields) +} - require.False(t, x.AssertVerb(v1)) - require.False(t, x.AssertVerb(v2)) +func TestContainer_SetNbf(t *testing.T) { + testLifetimeField(t, session.Container.Nbf, (*session.Container).SetNbf, (*apisession.SessionToken_Body_TokenLifetime).GetNbf, setRequiredContainerAPIFields) +} - x.ForVerb(v1) - require.True(t, x.AssertVerb(v1)) - require.False(t, x.AssertVerb(v2)) +func TestContainer_SetIat(t *testing.T) { + testLifetimeField(t, session.Container.Iat, (*session.Container).SetIat, (*apisession.SessionToken_Body_TokenLifetime).GetIat, setRequiredContainerAPIFields) } func TestIssuedBy(t *testing.T) { - var ( - token session.Container - issuer user.ID - signer = test.RandomSignerRFC6979(t) - ) - - issuer = signer.UserID() + var c session.Container - require.False(t, session.IssuedBy(token, issuer)) + usr, otherUsr := usertest.TwoUsers() + c.SetIssuer(usr.ID) + require.True(t, session.IssuedBy(c, usr.ID)) + require.False(t, session.IssuedBy(c, otherUsr.ID)) - require.NoError(t, token.Sign(signer)) - require.True(t, session.IssuedBy(token, issuer)) + require.NoError(t, c.Sign(otherUsr)) + require.False(t, session.IssuedBy(c, usr.ID)) + require.True(t, session.IssuedBy(c, otherUsr.ID)) } -func TestContainer_Issuer(t *testing.T) { - t.Run("signer", func(t *testing.T) { - var token session.Container - - signer := test.RandomSignerRFC6979(t) +func TestContainer_VerifySessionDataSignature(t *testing.T) { + var c session.Container + usr, otherUsr := usertest.TwoUsers() + someData := []byte("Hello, world!") - require.Zero(t, token.Issuer()) - require.NoError(t, token.Sign(signer)) - - issuer := signer.UserID() - require.True(t, token.Issuer().Equals(issuer)) - }) - - t.Run("external", func(t *testing.T) { - var token session.Container - - signer := test.RandomSignerRFC6979(t) - issuer := signer.UserID() - - token.SetIssuer(issuer) - require.True(t, token.Issuer().Equals(issuer)) - }) - - t.Run("public key", func(t *testing.T) { - var token session.Container + usrSig, err := usr.SignerRFC6979.Sign(someData) + require.NoError(t, err) + otherUsrSig, err := otherUsr.SignerRFC6979.Sign(someData) + require.NoError(t, err) - signer := test.RandomSignerRFC6979(t) + require.False(t, c.VerifySessionDataSignature(someData, usrSig)) + require.False(t, c.VerifySessionDataSignature(someData, otherUsrSig)) - require.Nil(t, token.IssuerPublicKeyBytes()) - require.NoError(t, token.Sign(signer)) + c.SetAuthKey(usr.Public()) + require.True(t, c.VerifySessionDataSignature(someData, usrSig)) + require.False(t, c.VerifySessionDataSignature(someData, otherUsrSig)) - require.Equal(t, neofscrypto.PublicKeyBytes(signer.Public()), token.IssuerPublicKeyBytes()) - }) + c.SetAuthKey(otherUsr.Public()) + require.False(t, c.VerifySessionDataSignature(someData, usrSig)) + require.True(t, c.VerifySessionDataSignature(someData, otherUsrSig)) } -func TestContainer_Sign(t *testing.T) { - val := sessiontest.Container() - - require.NoError(t, val.SetSignature(test.RandomSignerRFC6979(t))) - require.Zero(t, val.Issuer()) - require.True(t, val.VerifySignature()) - - require.NoError(t, val.Sign(test.RandomSignerRFC6979(t))) - - require.True(t, val.VerifySignature()) - - t.Run("issue#546", func(t *testing.T) { - signer1 := test.RandomSignerRFC6979(t) - signer2 := test.RandomSignerRFC6979(t) - require.False(t, signer1.UserID().Equals(signer2.UserID())) - - token1 := sessiontest.Container() - require.NoError(t, token1.Sign(signer1)) - require.Equal(t, signer1.UserID(), token1.Issuer()) +func TestContainer_ApplyOnlyTo(t *testing.T) { + var c session.Container - // copy token and re-sign - var token2 session.Container - token1.CopyTo(&token2) - require.NoError(t, token2.Sign(signer2)) - require.Equal(t, signer2.UserID(), token2.Issuer()) + cnr := cidtest.ID() + cnrOther := cidtest.ChangeID(cnr) + require.True(t, c.AppliedTo(cnr)) + require.True(t, c.AppliedTo(cnrOther)) + + c.ApplyOnlyTo(cnr) + require.True(t, c.AppliedTo(cnr)) + require.False(t, c.AppliedTo(cnrOther)) + + c.ApplyOnlyTo(cnrOther) + require.False(t, c.AppliedTo(cnr)) + require.True(t, c.AppliedTo(cnrOther)) + + t.Run("encoding", func(t *testing.T) { + t.Run("binary", func(t *testing.T) { + var src, dst session.Container + + dst.ApplyOnlyTo(cnr) + err := dst.Unmarshal(src.Marshal()) + require.NoError(t, err) + require.True(t, dst.AppliedTo(cnr)) + require.True(t, dst.AppliedTo(cnrOther)) + + src.ApplyOnlyTo(cnr) + err = dst.Unmarshal(src.Marshal()) + require.NoError(t, err) + require.True(t, dst.AppliedTo(cnr)) + require.False(t, dst.AppliedTo(cnrOther)) + }) + t.Run("api", func(t *testing.T) { + var src, dst session.Container + var msg apisession.SessionToken + + // set required data just to satisfy decoder + setRequiredContainerAPIFields(&src) + + dst.ApplyOnlyTo(cnr) + src.WriteToV2(&msg) + require.True(t, msg.Body.Context.(*apisession.SessionToken_Body_Container).Container.Wildcard) + require.Nil(t, msg.Body.Context.(*apisession.SessionToken_Body_Container).Container.ContainerId) + require.NoError(t, dst.ReadFromV2(&msg)) + require.True(t, dst.AppliedTo(cnr)) + require.True(t, dst.AppliedTo(cnrOther)) + + src.ApplyOnlyTo(cnr) + src.WriteToV2(&msg) + require.False(t, msg.Body.Context.(*apisession.SessionToken_Body_Container).Container.Wildcard) + require.Equal(t, &refs.ContainerID{Value: cnr[:]}, msg.Body.Context.(*apisession.SessionToken_Body_Container).Container.ContainerId) + require.NoError(t, dst.ReadFromV2(&msg)) + require.True(t, dst.AppliedTo(cnr)) + require.False(t, dst.AppliedTo(cnrOther)) + }) + t.Run("json", func(t *testing.T) { + var src, dst session.Container + + dst.ApplyOnlyTo(cnr) + j, err := src.MarshalJSON() + require.NoError(t, err) + err = dst.UnmarshalJSON(j) + require.NoError(t, err) + require.True(t, dst.AppliedTo(cnr)) + require.True(t, dst.AppliedTo(cnrOther)) + + src.ApplyOnlyTo(cnr) + j, err = src.MarshalJSON() + require.NoError(t, err) + err = dst.UnmarshalJSON(j) + require.NoError(t, err) + require.True(t, dst.AppliedTo(cnr)) + require.False(t, dst.AppliedTo(cnrOther)) + }) }) } -func TestContainer_SignedData(t *testing.T) { - id := usertest.ID(t) - - val := sessiontest.Container() - val.SetIssuer(id) - - signedData := val.SignedData() - var dec session.Container - require.NoError(t, dec.UnmarshalSignedData(signedData)) - require.Equal(t, val, dec) - - signer := user.NewSigner(test.RandomSigner(t), id) - test.SignedDataComponentUser(t, signer, &val) -} - -func TestContainer_VerifyDataSignature(t *testing.T) { - signer := test.RandomSignerRFC6979(t) - - var tok session.Container - - data := make([]byte, 100) - //nolint:staticcheck - rand.Read(data) - - var sig neofscrypto.Signature - require.NoError(t, sig.Calculate(signer, data)) - - var sigV2 refs.Signature - sig.WriteToV2(&sigV2) +func TestContainer_ForVerb(t *testing.T) { + var c session.Container - require.False(t, tok.VerifySessionDataSignature(data, sigV2.GetSign())) + for verb := range knownContainerVerbs { + require.False(t, c.AssertVerb(verb)) + c.ForVerb(verb) + require.True(t, c.AssertVerb(verb)) + } - tok.SetAuthKey(signer.Public()) - require.True(t, tok.VerifySessionDataSignature(data, sigV2.GetSign())) - require.False(t, tok.VerifySessionDataSignature(append(data, 1), sigV2.GetSign())) - require.False(t, tok.VerifySessionDataSignature(data, append(sigV2.GetSign(), 1))) + verb := session.ContainerVerb(mrand.Uint32() % 256) + verbOther := verb + 1 + c.ForVerb(verb) + require.True(t, c.AssertVerb(verb)) + require.False(t, c.AssertVerb(verbOther)) + + c.ForVerb(verbOther) + require.False(t, c.AssertVerb(verb)) + require.True(t, c.AssertVerb(verbOther)) + + t.Run("encoding", func(t *testing.T) { + t.Run("binary", func(t *testing.T) { + var src, dst session.Container + + dst.ForVerb(verb) + err := dst.Unmarshal(src.Marshal()) + require.NoError(t, err) + require.False(t, dst.AssertVerb(verb)) + require.False(t, dst.AssertVerb(verbOther)) + + dst.ForVerb(verbOther) + src.ForVerb(verb) + err = dst.Unmarshal(src.Marshal()) + require.NoError(t, err) + require.True(t, dst.AssertVerb(verb)) + require.False(t, dst.AssertVerb(verbOther)) + }) + t.Run("api", func(t *testing.T) { + var src, dst session.Container + var msg apisession.SessionToken + + // set required data just to satisfy decoder + setRequiredContainerAPIFields(&src) + + dst.ForVerb(verb) + src.WriteToV2(&msg) + require.Zero(t, msg.Body.Context.(*apisession.SessionToken_Body_Container).Container.Verb) + require.NoError(t, dst.ReadFromV2(&msg)) + require.False(t, dst.AssertVerb(verb)) + require.False(t, dst.AssertVerb(verbOther)) + + dst.ForVerb(verbOther) + src.ForVerb(verb) + src.WriteToV2(&msg) + require.EqualValues(t, verb, msg.Body.Context.(*apisession.SessionToken_Body_Container).Container.Verb) + require.NoError(t, dst.ReadFromV2(&msg)) + require.True(t, dst.AssertVerb(verb)) + require.False(t, dst.AssertVerb(verbOther)) + + for verb, apiVerb := range knownContainerVerbs { + src.ForVerb(verb) + src.WriteToV2(&msg) + require.EqualValues(t, apiVerb, msg.Body.Context.(*apisession.SessionToken_Body_Container).Container.Verb) + require.NoError(t, dst.ReadFromV2(&msg)) + require.True(t, dst.AssertVerb(verb)) + } + }) + t.Run("json", func(t *testing.T) { + var src, dst session.Container + + dst.ForVerb(verb) + j, err := src.MarshalJSON() + require.NoError(t, err) + err = dst.UnmarshalJSON(j) + require.NoError(t, err) + require.False(t, dst.AssertVerb(verb)) + require.False(t, dst.AssertVerb(verbOther)) + + dst.ForVerb(verbOther) + src.ForVerb(verb) + j, err = src.MarshalJSON() + require.NoError(t, err) + err = dst.UnmarshalJSON(j) + require.NoError(t, err) + require.True(t, dst.AssertVerb(verb)) + require.False(t, dst.AssertVerb(verbOther)) + }) + }) } diff --git a/session/example_test.go b/session/example_test.go index 14c39db18..43f33a9d4 100644 --- a/session/example_test.go +++ b/session/example_test.go @@ -1,7 +1,7 @@ package session_test import ( - apiGoSession "github.com/nspcc-dev/neofs-api-go/v2/session" + apisession "github.com/nspcc-dev/neofs-sdk-go/api/session" cid "github.com/nspcc-dev/neofs-sdk-go/container/id" neofscrypto "github.com/nspcc-dev/neofs-sdk-go/crypto" "github.com/nspcc-dev/neofs-sdk-go/session" @@ -12,10 +12,6 @@ import ( // will be authenticated by a trusted person. The principal confirms his trust by // signing the public part of the secret (public session key). func ExampleContainer() { - // import neofscrypto "github.com/nspcc-dev/neofs-sdk-go/crypto" - // import "github.com/nspcc-dev/neofs-sdk-go/user" - // import cid "github.com/nspcc-dev/neofs-sdk-go/container/id" - // you private key/signer, to prove you are you var principalSigner user.Signer // trusted party, who can do action on behalf of you. Usually the key maybe taken from Client.SessionCreate. @@ -35,17 +31,15 @@ func ExampleContainer() { // Instances can be also used to process NeoFS API V2 protocol messages with [https://github.com/nspcc-dev/neofs-api] package. func ExampleObject_marshalling() { - // import apiGoSession "github.com/nspcc-dev/neofs-api-go/v2/session" - // On the client side. var tok session.Object - var msg apiGoSession.Token + var msg apisession.SessionToken tok.WriteToV2(&msg) // *send message* // On the server side. - _ = tok.ReadFromV2(msg) + _ = tok.ReadFromV2(&msg) } diff --git a/session/object.go b/session/object.go index f974d25d2..64e3f55f4 100644 --- a/session/object.go +++ b/session/object.go @@ -4,12 +4,14 @@ import ( "errors" "fmt" - "github.com/nspcc-dev/neofs-api-go/v2/refs" - "github.com/nspcc-dev/neofs-api-go/v2/session" + "github.com/nspcc-dev/neofs-sdk-go/api/refs" + "github.com/nspcc-dev/neofs-sdk-go/api/session" cid "github.com/nspcc-dev/neofs-sdk-go/container/id" neofscrypto "github.com/nspcc-dev/neofs-sdk-go/crypto" oid "github.com/nspcc-dev/neofs-sdk-go/object/id" "github.com/nspcc-dev/neofs-sdk-go/user" + "google.golang.org/protobuf/encoding/protojson" + "google.golang.org/protobuf/proto" ) // Object represents token of the NeoFS Object session. A session is opened @@ -18,8 +20,8 @@ import ( // limited validity period, and applies to a strictly defined set of operations. // See methods for details. // -// Object is mutually compatible with github.com/nspcc-dev/neofs-api-go/v2/session.Token -// message. See ReadFromV2 / WriteToV2 methods. +// Object is mutually compatible with [session.Token] message. See +// [Object.ReadFromV2] / [Object.WriteToV2] methods. // // Instances can be created using built-in var declaration. type Object struct { @@ -51,125 +53,156 @@ func (x Object) CopyTo(dst *Object) { } } -func (x *Object) readContext(c session.TokenContext, checkFieldPresence bool) error { - cObj, ok := c.(*session.ObjectSessionContext) - if !ok || cObj == nil { - return fmt.Errorf("invalid context %T", c) +func (x *Object) readFromV2(m *session.SessionToken, checkFieldPresence bool) error { + err := x.commonData.readFromV2(m, checkFieldPresence) + if err != nil { + return err } - var err error + var ctx *session.ObjectSessionContext + if c := m.GetBody().GetContext(); c != nil { + cc, ok := c.(*session.SessionToken_Body_Object) + if !ok { + return errors.New("wrong context field") + } + ctx = cc.Object + } else if checkFieldPresence { + return errors.New("missing session context") + } else { + x.cnrSet = false + x.verb = 0 + x.objs = nil + return nil + } - cnr := cObj.GetContainer() + cnr := ctx.GetTarget().GetContainer() if x.cnrSet = cnr != nil; x.cnrSet { - err := x.cnr.ReadFromV2(*cnr) + err := x.cnr.ReadFromV2(cnr) if err != nil { - return fmt.Errorf("invalid container ID: %w", err) + return fmt.Errorf("invalid context: invalid target container: %w", err) } } else if checkFieldPresence { - return errors.New("missing target container") + return errors.New("invalid context: missing target container") } - objs := cObj.GetObjects() + objs := ctx.GetTarget().GetObjects() if objs != nil { x.objs = make([]oid.ID, len(objs)) for i := range objs { err = x.objs[i].ReadFromV2(objs[i]) if err != nil { - return fmt.Errorf("invalid target object: %w", err) + return fmt.Errorf("invalid context: invalid target object #%d: %w", i, err) } } } else { x.objs = nil } - x.verb = ObjectVerb(cObj.GetVerb()) + x.verb = ObjectVerb(ctx.GetVerb()) return nil } -func (x *Object) readFromV2(m session.Token, checkFieldPresence bool) error { - return x.commonData.readFromV2(m, checkFieldPresence, x.readContext) -} - -// ReadFromV2 reads Object from the session.Token message. Checks if the -// message conforms to NeoFS API V2 protocol. +// ReadFromV2 reads Object from the [session.SessionToken] message. Returns an +// error if the message is malformed according to the NeoFS API V2 protocol. The +// message must not be nil. // -// See also WriteToV2. -func (x *Object) ReadFromV2(m session.Token) error { +// ReadFromV2 is intended to be used by the NeoFS API V2 client/server +// implementation only and is not expected to be directly used by applications. +// +// See also [Object.WriteToV2]. +func (x *Object) ReadFromV2(m *session.SessionToken) error { return x.readFromV2(m, true) } -func (x Object) writeContext() session.TokenContext { - var c session.ObjectSessionContext - c.SetVerb(session.ObjectSessionVerb(x.verb)) - - if x.cnrSet || len(x.objs) > 0 { - var cnr *refs.ContainerID - - if x.cnrSet { - cnr = new(refs.ContainerID) - x.cnr.WriteToV2(cnr) +func (x Object) fillContext() *session.SessionToken_Body_Object { + c := session.SessionToken_Body_Object{ + Object: &session.ObjectSessionContext{ + Verb: session.ObjectSessionContext_Verb(x.verb), + }, + } + if x.cnrSet { + c.Object.Target = &session.ObjectSessionContext_Target{ + Container: new(refs.ContainerID), } - - var objs []refs.ObjectID - - if x.objs != nil { - objs = make([]refs.ObjectID, len(x.objs)) - - for i := range x.objs { - x.objs[i].WriteToV2(&objs[i]) - } + x.cnr.WriteToV2(c.Object.Target.Container) + } + if x.objs != nil { + if c.Object.Target == nil { + c.Object.Target = new(session.ObjectSessionContext_Target) + } + c.Object.Target.Objects = make([]*refs.ObjectID, len(x.objs)) + for i := range x.objs { + c.Object.Target.Objects[i] = new(refs.ObjectID) + x.objs[i].WriteToV2(c.Object.Target.Objects[i]) } - - c.SetTarget(cnr, objs...) } - return &c } -// WriteToV2 writes Object to the session.Token message. -// The message must not be nil. +// WriteToV2 writes Object to the [session.SessionToken] message of the NeoFS +// API protocol. // -// See also ReadFromV2. -func (x Object) WriteToV2(m *session.Token) { - x.writeToV2(m, x.writeContext) +// WriteToV2 is intended to be used by the NeoFS API V2 client/server +// implementation only and is not expected to be directly used by applications. +// +// See also [Object.ReadFromV2]. +func (x Object) WriteToV2(m *session.SessionToken) { + x.writeToV2(m) + m.Body.Context = x.fillContext() } // Marshal encodes Object into a binary format of the NeoFS API protocol -// (Protocol Buffers with direct field order). +// (Protocol Buffers V3 with direct field order). // -// See also Unmarshal. +// See also [Object.Unmarshal]. func (x Object) Marshal() []byte { - var m session.Token + var m session.SessionToken x.WriteToV2(&m) - - return x.marshal(x.writeContext) + b := make([]byte, m.MarshaledSize()) + m.MarshalStable(b) + return b } -// Unmarshal decodes NeoFS API protocol binary format into the Object -// (Protocol Buffers with direct field order). Returns an error describing -// a format violation. +// Unmarshal decodes Protocol Buffers V3 binary data into the Object. Returns an +// error describing a format violation of the specified fields. Unmarshal does +// not check presence of the required fields and, at the same time, checks +// format of presented fields. // -// See also Marshal. +// See also [Object.Marshal]. func (x *Object) Unmarshal(data []byte) error { - return x.unmarshal(data, x.readContext) + var m session.SessionToken + err := proto.Unmarshal(data, &m) + if err != nil { + return fmt.Errorf("decode protobuf: %w", err) + } + return x.readFromV2(&m, false) } // MarshalJSON encodes Object into a JSON format of the NeoFS API protocol -// (Protocol Buffers JSON). +// (Protocol Buffers V3 JSON). // -// See also UnmarshalJSON. +// See also [Object.UnmarshalJSON]. func (x Object) MarshalJSON() ([]byte, error) { - return x.marshalJSON(x.writeContext) + var m session.SessionToken + x.WriteToV2(&m) + return protojson.Marshal(&m) } -// UnmarshalJSON decodes NeoFS API protocol JSON format into the Object -// (Protocol Buffers JSON). Returns an error describing a format violation. +// UnmarshalJSON decodes NeoFS API protocol JSON data into the Object (Protocol +// Buffers V3 JSON). Returns an error describing a format violation. +// UnmarshalJSON does not check presence of the required fields and, at the same +// time, checks format of presented fields. // -// See also MarshalJSON. +// See also [Object.MarshalJSON]. func (x *Object) UnmarshalJSON(data []byte) error { - return x.unmarshalJSON(data, x.readContext) + var m session.SessionToken + err := protojson.Unmarshal(data, &m) + if err != nil { + return fmt.Errorf("decode protojson: %w", err) + } + return x.readFromV2(&m, false) } // Sign calculates and writes signature of the [Object] data along with issuer @@ -190,43 +223,52 @@ func (x *Object) Sign(signer user.Signer) error { // SetSignature allows to sign Object like [Object.Sign] but without issuer // setting. func (x *Object) SetSignature(signer neofscrypto.Signer) error { - return x.sign(signer, x.writeContext) + err := x.sig.Calculate(signer, x.SignedData()) + if err != nil { + return err + } + x.sigSet = true + return nil } -// SignedData returns actual payload to sign. +// SignedData returns signed data of the Object. +// +// [Object.SetIssuer] must be called before. // // See also [Object.Sign], [Object.UnmarshalSignedData]. -func (x *Object) SignedData() []byte { - return x.signedData(x.writeContext) +func (x Object) SignedData() []byte { + body := x.fillBody() + body.Context = x.fillContext() + b := make([]byte, body.MarshaledSize()) + body.MarshalStable(b) + return b } // UnmarshalSignedData is a reverse op to [Object.SignedData]. func (x *Object) UnmarshalSignedData(data []byte) error { - var body session.TokenBody - err := body.Unmarshal(data) + var body session.SessionToken_Body + err := proto.Unmarshal(data, &body) if err != nil { - return fmt.Errorf("decode body: %w", err) + return fmt.Errorf("decode protobuf: %w", err) } - var tok session.Token - tok.SetBody(&body) - return x.readFromV2(tok, false) + return x.readFromV2(&session.SessionToken{Body: &body}, false) } // VerifySignature checks if Object signature is presented and valid. // // Zero Object fails the check. // -// See also Sign. +// See also [Object.Sign], [Object.SetSignature]. func (x Object) VerifySignature() bool { // TODO: (#233) check owner<->key relation - return x.verifySignature(x.writeContext) + return x.sigSet && x.sig.Verify(x.SignedData()) } // BindContainer binds the Object session to a given container. Each session // MUST be bound to exactly one container. // -// See also AssertContainer. +// See also [Object.AssertContainer]. func (x *Object) BindContainer(cnr cid.ID) { x.cnr = cnr x.cnrSet = true @@ -237,9 +279,9 @@ func (x *Object) BindContainer(cnr cid.ID) { // Zero Object isn't bound to any container which is incorrect according to // NeoFS API protocol. // -// See also BindContainer. +// See also [Object.BindContainer]. func (x Object) AssertContainer(cnr cid.ID) bool { - return x.cnr.Equals(cnr) + return x.cnrSet && x.cnr == cnr } // LimitByObjects limits session scope to the given objects from the container @@ -247,8 +289,8 @@ func (x Object) AssertContainer(cnr cid.ID) bool { // // Argument MUST NOT be mutated, make a copy first. // -// See also AssertObject. -func (x *Object) LimitByObjects(objs ...oid.ID) { +// See also [Object.AssertObject]. +func (x *Object) LimitByObjects(objs []oid.ID) { x.objs = objs } @@ -256,14 +298,14 @@ func (x *Object) LimitByObjects(objs ...oid.ID) { // // Zero Object is applied to all objects in the container. // -// See also LimitByObjects. +// See also [Object.LimitByObjects]. func (x Object) AssertObject(obj oid.ID) bool { if len(x.objs) == 0 { return true } for i := range x.objs { - if x.objs[i].Equals(obj) { + if x.objs[i] == obj { return true } } @@ -289,7 +331,7 @@ const ( // ForVerb specifies the object operation of the session scope. Each // Object is related to the single operation. // -// See also AssertVerb. +// See also [Object.AssertVerb]. func (x *Object) ForVerb(verb ObjectVerb) { x.verb = verb } @@ -298,7 +340,7 @@ func (x *Object) ForVerb(verb ObjectVerb) { // // Zero Object relates to zero (unspecified) verb. // -// See also ForVerb. +// See also [Object.ForVerb]. func (x Object) AssertVerb(verbs ...ObjectVerb) bool { for i := range verbs { if verbs[i] == x.verb { @@ -313,7 +355,7 @@ func (x Object) AssertVerb(verbs ...ObjectVerb) bool { // // Zero Object is expired in any epoch. // -// See also SetExp. +// See also [Object.SetExp]. func (x Object) ExpiredAt(epoch uint64) bool { return x.expiredAt(epoch) } diff --git a/session/object_internal_test.go b/session/object_internal_test.go deleted file mode 100644 index 3bc0c6e41..000000000 --- a/session/object_internal_test.go +++ /dev/null @@ -1,107 +0,0 @@ -package session - -import ( - "bytes" - "crypto/rand" - "crypto/sha256" - "testing" - - "github.com/nspcc-dev/neofs-api-go/v2/session" - cidtest "github.com/nspcc-dev/neofs-sdk-go/container/id/test" - oid "github.com/nspcc-dev/neofs-sdk-go/object/id" - "github.com/stretchr/testify/require" -) - -func generateRandomOids(size int) []oid.ID { - checksum := [sha256.Size]byte{} - - result := make([]oid.ID, size) - for i := 0; i < size; i++ { - _, _ = rand.Read(checksum[:]) - - var id oid.ID - id.SetSHA256(checksum) - result[i] = id - } - - return result -} - -func TestObject_CopyTo(t *testing.T) { - var container Object - - containerID := cidtest.ID() - - container.ForVerb(VerbObjectDelete) - container.BindContainer(containerID) - container.LimitByObjects(generateRandomOids(2)...) - - t.Run("copy", func(t *testing.T) { - var dst Object - container.CopyTo(&dst) - - emptyWriter := func() session.TokenContext { - return &session.ContainerSessionContext{} - } - - require.Equal(t, container, dst) - require.True(t, bytes.Equal(container.marshal(emptyWriter), dst.marshal(emptyWriter))) - }) - - t.Run("change simple fields", func(t *testing.T) { - var dst Object - container.CopyTo(&dst) - - require.Equal(t, container.verb, dst.verb) - require.True(t, container.cnrSet) - require.True(t, dst.cnrSet) - - container.ForVerb(VerbObjectHead) - - require.NotEqual(t, container.verb, dst.verb) - require.True(t, container.cnrSet) - require.True(t, dst.cnrSet) - }) - - t.Run("change ids", func(t *testing.T) { - var dst Object - container.CopyTo(&dst) - - for i := range container.objs { - require.True(t, container.objs[i].Equals(dst.objs[i])) - - // change object id in the new object - for j := range dst.objs[i] { - dst.objs[i][j] = byte(j) - } - } - - for i := range container.objs { - require.False(t, container.objs[i].Equals(dst.objs[i])) - } - }) - - t.Run("overwrite container id", func(t *testing.T) { - var local Object - require.False(t, local.cnrSet) - - var dst Object - dst.BindContainer(containerID) - require.True(t, dst.cnrSet) - - local.CopyTo(&dst) - emptyWriter := func() session.TokenContext { - return &session.ContainerSessionContext{} - } - - require.Equal(t, local, dst) - require.True(t, bytes.Equal(local.marshal(emptyWriter), dst.marshal(emptyWriter))) - - require.False(t, local.cnrSet) - require.False(t, dst.cnrSet) - - dst.BindContainer(containerID) - require.True(t, dst.cnrSet) - require.False(t, local.cnrSet) - }) -} diff --git a/session/object_test.go b/session/object_test.go index b0e95c2f9..43f7cad89 100644 --- a/session/object_test.go +++ b/session/object_test.go @@ -1,680 +1,424 @@ package session_test import ( - "bytes" - "fmt" - "math" - "math/rand" + mrand "math/rand" "testing" - "github.com/google/uuid" - "github.com/nspcc-dev/neofs-api-go/v2/refs" - v2session "github.com/nspcc-dev/neofs-api-go/v2/session" + "github.com/nspcc-dev/neofs-sdk-go/api/refs" + apisession "github.com/nspcc-dev/neofs-sdk-go/api/session" cidtest "github.com/nspcc-dev/neofs-sdk-go/container/id/test" - neofscrypto "github.com/nspcc-dev/neofs-sdk-go/crypto" - "github.com/nspcc-dev/neofs-sdk-go/crypto/test" + oid "github.com/nspcc-dev/neofs-sdk-go/object/id" oidtest "github.com/nspcc-dev/neofs-sdk-go/object/id/test" "github.com/nspcc-dev/neofs-sdk-go/session" sessiontest "github.com/nspcc-dev/neofs-sdk-go/session/test" - usertest "github.com/nspcc-dev/neofs-sdk-go/user/test" "github.com/stretchr/testify/require" ) -func TestObjectProtocolV2(t *testing.T) { - var validV2 v2session.Token - - var body v2session.TokenBody - validV2.SetBody(&body) - - // ID - id := uuid.New() - binID, err := id.MarshalBinary() - require.NoError(t, err) - restoreID := func() { - body.SetID(binID) - } - restoreID() - - // Owner - usr := usertest.ID(t) - var usrV2 refs.OwnerID - usr.WriteToV2(&usrV2) - restoreUser := func() { - body.SetOwnerID(&usrV2) - } - restoreUser() - - // Lifetime - var lifetime v2session.TokenLifetime - lifetime.SetIat(1) - lifetime.SetNbf(2) - lifetime.SetExp(3) - restoreLifetime := func() { - body.SetLifetime(&lifetime) - } - restoreLifetime() - - // Session key - signer := test.RandomSignerRFC6979(t) - authKey := signer.Public() - binAuthKey := neofscrypto.PublicKeyBytes(authKey) - restoreAuthKey := func() { - body.SetSessionKey(binAuthKey) - } - restoreAuthKey() - - // Context - cnr := cidtest.ID() - obj1 := oidtest.ID() - obj2 := oidtest.ID() - var cnrV2 refs.ContainerID - cnr.WriteToV2(&cnrV2) - var obj1V2 refs.ObjectID - obj1.WriteToV2(&obj1V2) - var obj2V2 refs.ObjectID - obj2.WriteToV2(&obj2V2) - var cObj v2session.ObjectSessionContext - restoreCtx := func() { - cObj.SetTarget(&cnrV2, obj1V2, obj2V2) - body.SetContext(&cObj) - } - restoreCtx() - - // Signature - var sig refs.Signature - restoreSig := func() { - validV2.SetSignature(&sig) - } - restoreSig() - - // TODO(@cthulhu-rider): #260 use functionality for message corruption - - for _, testcase := range []struct { - name string - corrupt []func() - restore func() - assert func(session.Object) - breakSign func(*v2session.Token) - }{ - { - name: "Signature", - corrupt: []func(){ - func() { - validV2.SetSignature(nil) - }, - }, - restore: restoreSig, - }, - { - name: "ID", - corrupt: []func(){ - func() { - body.SetID([]byte{1, 2, 3}) - }, - func() { - id, err := uuid.NewDCEPerson() - require.NoError(t, err) - bindID, err := id.MarshalBinary() - require.NoError(t, err) - body.SetID(bindID) - }, - }, - restore: restoreID, - assert: func(val session.Object) { - require.Equal(t, id, val.ID()) - }, - breakSign: func(m *v2session.Token) { - id := m.GetBody().GetID() - id[len(id)-1]++ - }, - }, - { - name: "User", - corrupt: []func(){ - func() { - var brokenUsrV2 refs.OwnerID - brokenUsrV2.SetValue(append(usrV2.GetValue(), 1)) - body.SetOwnerID(&brokenUsrV2) - }, - }, - restore: restoreUser, - assert: func(val session.Object) { - require.Equal(t, usr, val.Issuer()) - }, - breakSign: func(m *v2session.Token) { - id := m.GetBody().GetOwnerID().GetValue() - copy(id, usertest.ID(t).WalletBytes()) - }, - }, - { - name: "Lifetime", - corrupt: []func(){ - func() { - body.SetLifetime(nil) - }, - }, - restore: restoreLifetime, - assert: func(val session.Object) { - require.True(t, val.InvalidAt(1)) - require.False(t, val.InvalidAt(2)) - require.False(t, val.InvalidAt(3)) - require.True(t, val.InvalidAt(4)) - }, - breakSign: func(m *v2session.Token) { - lt := m.GetBody().GetLifetime() - lt.SetIat(lt.GetIat() + 1) - }, - }, - { - name: "Auth key", - corrupt: []func(){ - func() { - body.SetSessionKey(nil) - }, - func() { - body.SetSessionKey([]byte{}) - }, - }, - restore: restoreAuthKey, - assert: func(val session.Object) { - require.True(t, val.AssertAuthKey(authKey)) - }, - breakSign: func(m *v2session.Token) { - body := m.GetBody() - key := body.GetSessionKey() - cp := bytes.Clone(key) - cp[len(cp)-1]++ - body.SetSessionKey(cp) - }, - }, - { - name: "Context", - corrupt: []func(){ - func() { - body.SetContext(nil) - }, - func() { - cObj.SetTarget(nil) - }, - func() { - var brokenCnr refs.ContainerID - brokenCnr.SetValue(append(cnrV2.GetValue(), 1)) - cObj.SetTarget(&brokenCnr) - }, - func() { - var brokenObj refs.ObjectID - brokenObj.SetValue(append(obj1V2.GetValue(), 1)) - cObj.SetTarget(&cnrV2, brokenObj) - }, - }, - restore: restoreCtx, - assert: func(val session.Object) { - require.True(t, val.AssertContainer(cnr)) - require.False(t, val.AssertContainer(cidtest.ID())) - require.True(t, val.AssertObject(obj1)) - require.True(t, val.AssertObject(obj2)) - require.False(t, val.AssertObject(oidtest.ID())) - }, - breakSign: func(m *v2session.Token) { - cnr := m.GetBody().GetContext().(*v2session.ObjectSessionContext).GetContainer().GetValue() - cnr[len(cnr)-1]++ - }, - }, - } { - var val session.Object - - for i, corrupt := range testcase.corrupt { - corrupt() - require.Error(t, val.ReadFromV2(validV2), testcase.name, fmt.Sprintf("corrupt #%d", i)) - - testcase.restore() - require.NoError(t, val.ReadFromV2(validV2), testcase.name, fmt.Sprintf("corrupt #%d", i)) - - if testcase.assert != nil { - testcase.assert(val) - } - - if testcase.breakSign != nil { - require.NoError(t, val.Sign(signer), testcase.name) - require.True(t, val.VerifySignature(), testcase.name) - - var signedV2 v2session.Token - val.WriteToV2(&signedV2) - - var restored session.Object - require.NoError(t, restored.ReadFromV2(signedV2), testcase.name) - require.True(t, restored.VerifySignature(), testcase.name) - - testcase.breakSign(&signedV2) - - require.NoError(t, restored.ReadFromV2(signedV2), testcase.name) - require.False(t, restored.VerifySignature(), testcase.name) - } - } - } +var knownObjectVerbs = map[session.ObjectVerb]apisession.ObjectSessionContext_Verb{ + session.VerbObjectPut: apisession.ObjectSessionContext_PUT, + session.VerbObjectGet: apisession.ObjectSessionContext_GET, + session.VerbObjectHead: apisession.ObjectSessionContext_HEAD, + session.VerbObjectDelete: apisession.ObjectSessionContext_DELETE, + session.VerbObjectSearch: apisession.ObjectSessionContext_SEARCH, + session.VerbObjectRange: apisession.ObjectSessionContext_RANGE, + session.VerbObjectRangeHash: apisession.ObjectSessionContext_RANGEHASH, } -func TestObject_WriteToV2(t *testing.T) { - var val session.Object - - assert := func(baseAssert func(v2session.Token)) { - var m v2session.Token - val.WriteToV2(&m) - baseAssert(m) - } - - // ID - id := uuid.New() - - binID, err := id.MarshalBinary() - require.NoError(t, err) - - val.SetID(id) - assert(func(m v2session.Token) { - require.Equal(t, binID, m.GetBody().GetID()) - }) - - // Owner/Signature - signer := test.RandomSignerRFC6979(t) - - require.NoError(t, val.Sign(signer)) - - usr := signer.UserID() - - var usrV2 refs.OwnerID - usr.WriteToV2(&usrV2) - - assert(func(m v2session.Token) { - require.Equal(t, &usrV2, m.GetBody().GetOwnerID()) - - sig := m.GetSignature() - require.NotZero(t, sig.GetKey()) - require.NotZero(t, sig.GetSign()) - }) +func setRequiredObjectAPIFields(o *session.Object) { + setRequiredTokenAPIFields(o) + o.BindContainer(cidtest.ID()) +} - // Lifetime - const iat, nbf, exp = 1, 2, 3 - val.SetIat(iat) - val.SetNbf(nbf) - val.SetExp(exp) - - assert(func(m v2session.Token) { - lt := m.GetBody().GetLifetime() - require.EqualValues(t, iat, lt.GetIat()) - require.EqualValues(t, nbf, lt.GetNbf()) - require.EqualValues(t, exp, lt.GetExp()) +func TestObjectDecoding(t *testing.T) { + testDecoding(t, sessiontest.Object, []invalidAPITestCase{ + {name: "body/context/wrong", err: "wrong context field", corrupt: func(st *apisession.SessionToken) { + st.Body.Context = new(apisession.SessionToken_Body_Container) + }}, + {name: "body/context/target/container/value/nil", err: "invalid context: invalid target container: missing value field", corrupt: func(st *apisession.SessionToken) { + st.Body.Context.(*apisession.SessionToken_Body_Object).Object.Target.Container.Value = nil + }}, + {name: "body/context/target/container/value/empty", err: "invalid context: invalid target container: missing value field", corrupt: func(st *apisession.SessionToken) { + st.Body.Context.(*apisession.SessionToken_Body_Object).Object.Target.Container.Value = []byte{} + }}, + {name: "body/context/target/container/value/wrong length", err: "invalid context: invalid target container: invalid value length 31", corrupt: func(st *apisession.SessionToken) { + st.Body.Context.(*apisession.SessionToken_Body_Object).Object.Target.Container.Value = make([]byte, 31) + }}, + {name: "body/context/target/objects/value/nil", err: "invalid context: invalid target object #1: missing value field", corrupt: func(st *apisession.SessionToken) { + st.Body.Context.(*apisession.SessionToken_Body_Object).Object.Target.Objects = []*refs.ObjectID{ + {Value: make([]byte, 32)}, {Value: nil}} + }}, + {name: "body/context/target/objects/value/empty", err: "invalid context: invalid target object #1: missing value field", corrupt: func(st *apisession.SessionToken) { + st.Body.Context.(*apisession.SessionToken_Body_Object).Object.Target.Objects = []*refs.ObjectID{ + {Value: make([]byte, 32)}, {Value: nil}} + }}, + {name: "body/context/target/objects/value/wrong length", err: "invalid context: invalid target object #1: invalid value length 31", corrupt: func(st *apisession.SessionToken) { + st.Body.Context.(*apisession.SessionToken_Body_Object).Object.Target.Objects = []*refs.ObjectID{ + {Value: make([]byte, 32)}, {Value: make([]byte, 31)}} + }}, }) +} - // Context - assert(func(m v2session.Token) { - cCnr, ok := m.GetBody().GetContext().(*v2session.ObjectSessionContext) - require.True(t, ok) - require.Zero(t, cCnr.GetContainer()) - require.Zero(t, cCnr.GetObjects()) +func TestObject_Unmarshal(t *testing.T) { + t.Run("invalid binary", func(t *testing.T) { + var c session.Object + msg := []byte("definitely_not_protobuf") + err := c.Unmarshal(msg) + require.ErrorContains(t, err, "decode protobuf") }) +} - cnr := cidtest.ID() - - var cnrV2 refs.ContainerID - cnr.WriteToV2(&cnrV2) - - obj1 := oidtest.ID() - obj2 := oidtest.ID() - - var obj1V2 refs.ObjectID - obj1.WriteToV2(&obj1V2) - var obj2V2 refs.ObjectID - obj2.WriteToV2(&obj2V2) - - val.BindContainer(cnr) - val.LimitByObjects(obj1, obj2) - - assert(func(m v2session.Token) { - cCnr, ok := m.GetBody().GetContext().(*v2session.ObjectSessionContext) - require.True(t, ok) - require.Equal(t, &cnrV2, cCnr.GetContainer()) - require.Equal(t, []refs.ObjectID{obj1V2, obj2V2}, cCnr.GetObjects()) +func TestObject_UnmarshalJSON(t *testing.T) { + t.Run("invalid json", func(t *testing.T) { + var c session.Object + msg := []byte("definitely_not_protojson") + err := c.UnmarshalJSON(msg) + require.ErrorContains(t, err, "decode protojson") }) } -func TestObject_BindContainer(t *testing.T) { - var val session.Object - var m v2session.Token - filled := sessiontest.Object() - - assertDefaults := func() { - cCnr, ok := m.GetBody().GetContext().(*v2session.ObjectSessionContext) - require.True(t, ok) - require.Zero(t, cCnr.GetContainer()) - require.Zero(t, cCnr.GetObjects()) - } - - assertBinary := func(baseAssert func()) { - val2 := filled - - require.NoError(t, val2.Unmarshal(val.Marshal())) - baseAssert() - } - - assertJSON := func(baseAssert func()) { - val2 := filled - - jd, err := val.MarshalJSON() - require.NoError(t, err) - - require.NoError(t, val2.UnmarshalJSON(jd)) - baseAssert() - } - - val.WriteToV2(&m) - - assertDefaults() - assertBinary(assertDefaults) - assertJSON(assertDefaults) - - // set value - - cnr := cidtest.ID() - - var cnrV2 refs.ContainerID - cnr.WriteToV2(&cnrV2) - - val.BindContainer(cnr) - - val.WriteToV2(&m) - - assertCnr := func() { - cObj, ok := m.GetBody().GetContext().(*v2session.ObjectSessionContext) - require.True(t, ok) - require.Equal(t, &cnrV2, cObj.GetContainer()) - } - - assertCnr() - assertBinary(assertCnr) - assertJSON(assertCnr) +func TestObject_CopyTo(t *testing.T) { + testCopyTo(t, sessiontest.Object()) } -func TestObject_AssertContainer(t *testing.T) { - var x session.Object - - cnr := cidtest.ID() - - require.False(t, x.AssertContainer(cnr)) - - x.BindContainer(cnr) - - require.True(t, x.AssertContainer(cnr)) +func TestObject_UnmarshalSignedData(t *testing.T) { + testSignedData(t, sessiontest.ObjectUnsigned()) } -func TestObject_LimitByObjects(t *testing.T) { - var val session.Object - var m v2session.Token - filled := sessiontest.Object() - - assertDefaults := func() { - cCnr, ok := m.GetBody().GetContext().(*v2session.ObjectSessionContext) - require.True(t, ok) - require.Zero(t, cCnr.GetContainer()) - require.Zero(t, cCnr.GetObjects()) - } - - assertBinary := func(baseAssert func()) { - val2 := filled - - require.NoError(t, val2.Unmarshal(val.Marshal())) - baseAssert() - } - - assertJSON := func(baseAssert func()) { - val2 := filled - - jd, err := val.MarshalJSON() - require.NoError(t, err) - - require.NoError(t, val2.UnmarshalJSON(jd)) - baseAssert() - } - - val.WriteToV2(&m) - - assertDefaults() - assertBinary(assertDefaults) - assertJSON(assertDefaults) - - // set value - - obj1 := oidtest.ID() - obj2 := oidtest.ID() - - var obj1V2 refs.ObjectID - obj1.WriteToV2(&obj1V2) - var obj2V2 refs.ObjectID - obj2.WriteToV2(&obj2V2) - - val.LimitByObjects(obj1, obj2) - - val.WriteToV2(&m) - - assertObj := func() { - cObj, ok := m.GetBody().GetContext().(*v2session.ObjectSessionContext) - require.True(t, ok) - require.Equal(t, []refs.ObjectID{obj1V2, obj2V2}, cObj.GetObjects()) - } - - assertObj() - assertBinary(assertObj) - assertJSON(assertObj) +func TestObject_SetAuthKey(t *testing.T) { + testAuthKey(t, setRequiredObjectAPIFields) } -func TestObject_AssertObject(t *testing.T) { - var x session.Object - - obj1 := oidtest.ID() - obj2 := oidtest.ID() - objOther := oidtest.ID() +func TestObject_Sign(t *testing.T) { + testSign(t, setRequiredObjectAPIFields) +} - require.True(t, x.AssertObject(obj1)) - require.True(t, x.AssertObject(obj2)) - require.True(t, x.AssertObject(objOther)) +func TestObject_SetSignature(t *testing.T) { + testSetSignature(t, setRequiredObjectAPIFields) +} - x.LimitByObjects(obj1, obj2) +func TestObject_SetIssuer(t *testing.T) { + testIssuer(t, setRequiredObjectAPIFields) +} - require.True(t, x.AssertObject(obj1)) - require.True(t, x.AssertObject(obj2)) - require.False(t, x.AssertObject(objOther)) +func TestObject_SetID(t *testing.T) { + testID(t, setRequiredObjectAPIFields) } func TestObject_InvalidAt(t *testing.T) { - var x session.Object - - nbf := rand.Uint64() - if nbf == math.MaxUint64 { - nbf-- - } - - iat := nbf - exp := iat + 1 - - x.SetNbf(nbf) - x.SetIat(iat) - x.SetExp(exp) - - require.True(t, x.InvalidAt(nbf-1)) - require.True(t, x.InvalidAt(iat-1)) - require.False(t, x.InvalidAt(iat)) - require.False(t, x.InvalidAt(exp)) - require.True(t, x.InvalidAt(exp+1)) + testInvalidAt(t, session.Object{}) } -func TestObject_ID(t *testing.T) { - var x session.Object - - require.Zero(t, x.ID()) - - id := uuid.New() - - x.SetID(id) - - require.Equal(t, id, x.ID()) +func TestObject_SetExp(t *testing.T) { + testLifetimeField(t, session.Object.Exp, (*session.Object).SetExp, (*apisession.SessionToken_Body_TokenLifetime).GetExp, setRequiredObjectAPIFields) } -func TestObject_AssertAuthKey(t *testing.T) { - var x session.Object - - key := test.RandomSignerRFC6979(t).Public() - - require.False(t, x.AssertAuthKey(key)) - - x.SetAuthKey(key) - - require.True(t, x.AssertAuthKey(key)) +func TestObject_SetNbf(t *testing.T) { + testLifetimeField(t, session.Object.Nbf, (*session.Object).SetNbf, (*apisession.SessionToken_Body_TokenLifetime).GetNbf, setRequiredObjectAPIFields) } -func TestObject_ForVerb(t *testing.T) { - var val session.Object - var m v2session.Token - filled := sessiontest.Object() - - assertDefaults := func() { - cCnr, ok := m.GetBody().GetContext().(*v2session.ObjectSessionContext) - require.True(t, ok) - require.Zero(t, cCnr.GetVerb()) - } - - assertBinary := func(baseAssert func()) { - val2 := filled - - require.NoError(t, val2.Unmarshal(val.Marshal())) - baseAssert() - } - - assertJSON := func(baseAssert func()) { - val2 := filled - - jd, err := val.MarshalJSON() - require.NoError(t, err) - - require.NoError(t, val2.UnmarshalJSON(jd)) - baseAssert() - } - - val.WriteToV2(&m) - - assertDefaults() - assertBinary(assertDefaults) - assertJSON(assertDefaults) - - // set value - - assertVerb := func(verb v2session.ObjectSessionVerb) { - cCnr, ok := m.GetBody().GetContext().(*v2session.ObjectSessionContext) - require.True(t, ok) - require.Equal(t, verb, cCnr.GetVerb()) - } - - for from, to := range map[session.ObjectVerb]v2session.ObjectSessionVerb{ - session.VerbObjectPut: v2session.ObjectVerbPut, - session.VerbObjectGet: v2session.ObjectVerbGet, - session.VerbObjectHead: v2session.ObjectVerbHead, - session.VerbObjectSearch: v2session.ObjectVerbSearch, - session.VerbObjectRangeHash: v2session.ObjectVerbRangeHash, - session.VerbObjectRange: v2session.ObjectVerbRange, - session.VerbObjectDelete: v2session.ObjectVerbDelete, - } { - val.ForVerb(from) - - val.WriteToV2(&m) - - assertVerb(to) - assertBinary(func() { assertVerb(to) }) - assertJSON(func() { assertVerb(to) }) - } +func TestObject_SetIat(t *testing.T) { + testLifetimeField(t, session.Object.Iat, (*session.Object).SetIat, (*apisession.SessionToken_Body_TokenLifetime).GetIat, setRequiredObjectAPIFields) } -func TestObject_AssertVerb(t *testing.T) { - var x session.Object - - const v1, v2 = session.VerbObjectGet, session.VerbObjectPut - - require.False(t, x.AssertVerb(v1, v2)) - - x.ForVerb(v1) - require.True(t, x.AssertVerb(v1)) - require.False(t, x.AssertVerb(v2)) - require.True(t, x.AssertVerb(v1, v2)) - require.True(t, x.AssertVerb(v2, v1)) +func TestObject_ExpiredAt(t *testing.T) { + var o session.Object + require.True(t, o.ExpiredAt(0)) + require.True(t, o.ExpiredAt(1)) + + o.SetExp(1) + require.False(t, o.ExpiredAt(0)) + require.False(t, o.ExpiredAt(1)) + require.True(t, o.ExpiredAt(2)) + require.True(t, o.ExpiredAt(3)) + + o.SetExp(2) + require.False(t, o.ExpiredAt(0)) + require.False(t, o.ExpiredAt(1)) + require.False(t, o.ExpiredAt(2)) + require.True(t, o.ExpiredAt(3)) } -func TestObject_Issuer(t *testing.T) { - var token session.Object - signer := test.RandomSignerRFC6979(t) - - require.Zero(t, token.Issuer()) - require.Nil(t, token.IssuerPublicKeyBytes()) - - require.NoError(t, token.Sign(signer)) +func TestObject_ForVerb(t *testing.T) { + var c session.Object - issuer := signer.UserID() + for verb := range knownObjectVerbs { + require.False(t, c.AssertVerb(verb)) + c.ForVerb(verb) + require.True(t, c.AssertVerb(verb)) + } - require.True(t, token.Issuer().Equals(issuer)) - require.Equal(t, neofscrypto.PublicKeyBytes(signer.Public()), token.IssuerPublicKeyBytes()) + verb := session.ObjectVerb(mrand.Uint32() % 256) + verbOther := verb + 1 + c.ForVerb(verb) + require.True(t, c.AssertVerb(verb)) + require.False(t, c.AssertVerb(verbOther)) + + c.ForVerb(verbOther) + require.False(t, c.AssertVerb(verb)) + require.True(t, c.AssertVerb(verbOther)) + + t.Run("encoding", func(t *testing.T) { + t.Run("binary", func(t *testing.T) { + var src, dst session.Object + + dst.ForVerb(verb) + err := dst.Unmarshal(src.Marshal()) + require.NoError(t, err) + require.False(t, dst.AssertVerb(verb)) + require.False(t, dst.AssertVerb(verbOther)) + + dst.ForVerb(verbOther) + src.ForVerb(verb) + err = dst.Unmarshal(src.Marshal()) + require.NoError(t, err) + require.True(t, dst.AssertVerb(verb)) + require.False(t, dst.AssertVerb(verbOther)) + }) + t.Run("api", func(t *testing.T) { + var src, dst session.Object + var msg apisession.SessionToken + + // set required data just to satisfy decoder + setRequiredObjectAPIFields(&src) + + dst.ForVerb(verb) + src.WriteToV2(&msg) + require.Zero(t, msg.Body.Context.(*apisession.SessionToken_Body_Object).Object.Verb) + require.NoError(t, dst.ReadFromV2(&msg)) + require.False(t, dst.AssertVerb(verb)) + require.False(t, dst.AssertVerb(verbOther)) + + dst.ForVerb(verbOther) + src.ForVerb(verb) + src.WriteToV2(&msg) + require.EqualValues(t, verb, msg.Body.Context.(*apisession.SessionToken_Body_Object).Object.Verb) + require.NoError(t, dst.ReadFromV2(&msg)) + require.True(t, dst.AssertVerb(verb)) + require.False(t, dst.AssertVerb(verbOther)) + + for verb, apiVerb := range knownObjectVerbs { + src.ForVerb(verb) + src.WriteToV2(&msg) + require.EqualValues(t, apiVerb, msg.Body.Context.(*apisession.SessionToken_Body_Object).Object.Verb) + require.NoError(t, dst.ReadFromV2(&msg)) + require.True(t, dst.AssertVerb(verb)) + } + }) + t.Run("json", func(t *testing.T) { + var src, dst session.Object + + dst.ForVerb(verb) + j, err := src.MarshalJSON() + require.NoError(t, err) + err = dst.UnmarshalJSON(j) + require.NoError(t, err) + require.False(t, dst.AssertVerb(verb)) + require.False(t, dst.AssertVerb(verbOther)) + + dst.ForVerb(verbOther) + src.ForVerb(verb) + j, err = src.MarshalJSON() + require.NoError(t, err) + err = dst.UnmarshalJSON(j) + require.NoError(t, err) + require.True(t, dst.AssertVerb(verb)) + require.False(t, dst.AssertVerb(verbOther)) + }) + }) } -func TestObject_Sign(t *testing.T) { - val := sessiontest.Object() - - require.NoError(t, val.SetSignature(test.RandomSignerRFC6979(t))) - require.Zero(t, val.Issuer()) - require.True(t, val.VerifySignature()) - - require.NoError(t, val.Sign(test.RandomSignerRFC6979(t))) - - require.True(t, val.VerifySignature()) - - t.Run("issue#546", func(t *testing.T) { - signer1 := test.RandomSignerRFC6979(t) - signer2 := test.RandomSignerRFC6979(t) - require.False(t, signer1.UserID().Equals(signer2.UserID())) - - token1 := sessiontest.Object() - require.NoError(t, token1.Sign(signer1)) - require.Equal(t, signer1.UserID(), token1.Issuer()) +func TestObject_BindContainer(t *testing.T) { + var c session.Object - // copy token and re-sign - var token2 session.Object - token1.CopyTo(&token2) - require.NoError(t, token2.Sign(signer2)) - require.Equal(t, signer2.UserID(), token2.Issuer()) + cnr := cidtest.ID() + cnrOther := cidtest.ChangeID(cnr) + require.False(t, c.AssertContainer(cnr)) + require.False(t, c.AssertContainer(cnrOther)) + + c.BindContainer(cnr) + require.True(t, c.AssertContainer(cnr)) + require.False(t, c.AssertContainer(cnrOther)) + + c.BindContainer(cnrOther) + require.False(t, c.AssertContainer(cnr)) + require.True(t, c.AssertContainer(cnrOther)) + + t.Run("encoding", func(t *testing.T) { + t.Run("binary", func(t *testing.T) { + var src, dst session.Object + + dst.BindContainer(cnr) + err := dst.Unmarshal(src.Marshal()) + require.NoError(t, err) + require.False(t, dst.AssertContainer(cnr)) + require.False(t, dst.AssertContainer(cnrOther)) + + src.BindContainer(cnr) + err = dst.Unmarshal(src.Marshal()) + require.NoError(t, err) + require.True(t, dst.AssertContainer(cnr)) + require.False(t, dst.AssertContainer(cnrOther)) + + src.BindContainer(cnrOther) + err = dst.Unmarshal(src.Marshal()) + require.NoError(t, err) + require.False(t, dst.AssertContainer(cnr)) + require.True(t, dst.AssertContainer(cnrOther)) + }) + t.Run("api", func(t *testing.T) { + var src, dst session.Object + var msg apisession.SessionToken + + // set required data just to satisfy decoder + setRequiredTokenAPIFields(&src) + + src.BindContainer(cnr) + src.WriteToV2(&msg) + require.Equal(t, &refs.ContainerID{Value: cnr[:]}, msg.Body.Context.(*apisession.SessionToken_Body_Object).Object.Target.Container) + require.NoError(t, dst.ReadFromV2(&msg)) + require.True(t, dst.AssertContainer(cnr)) + require.False(t, dst.AssertContainer(cnrOther)) + + src.BindContainer(cnrOther) + src.WriteToV2(&msg) + require.Equal(t, &refs.ContainerID{Value: cnrOther[:]}, msg.Body.Context.(*apisession.SessionToken_Body_Object).Object.Target.Container) + require.NoError(t, dst.ReadFromV2(&msg)) + require.False(t, dst.AssertContainer(cnr)) + require.True(t, dst.AssertContainer(cnrOther)) + }) + t.Run("json", func(t *testing.T) { + var src, dst session.Object + + dst.BindContainer(cnr) + j, err := src.MarshalJSON() + require.NoError(t, err) + err = dst.UnmarshalJSON(j) + require.NoError(t, err) + require.False(t, dst.AssertContainer(cnr)) + require.False(t, dst.AssertContainer(cnrOther)) + + src.BindContainer(cnr) + j, err = src.MarshalJSON() + require.NoError(t, err) + err = dst.UnmarshalJSON(j) + require.NoError(t, err) + require.True(t, dst.AssertContainer(cnr)) + require.False(t, dst.AssertContainer(cnrOther)) + + src.BindContainer(cnrOther) + j, err = src.MarshalJSON() + require.NoError(t, err) + err = dst.UnmarshalJSON(j) + require.NoError(t, err) + require.False(t, dst.AssertContainer(cnr)) + require.True(t, dst.AssertContainer(cnrOther)) + }) }) } -func TestObject_SignedData(t *testing.T) { - issuerSigner := test.RandomSignerRFC6979(t) - issuer := issuerSigner.UserID() - - var tokenSession session.Object - tokenSession.SetID(uuid.New()) - tokenSession.SetExp(100500) - tokenSession.BindContainer(cidtest.ID()) - tokenSession.ForVerb(session.VerbObjectPut) - tokenSession.SetAuthKey(test.RandomSignerRFC6979(t).Public()) - tokenSession.SetIssuer(issuer) - - signedData := tokenSession.SignedData() - var dec session.Object - require.NoError(t, dec.UnmarshalSignedData(signedData)) - require.Equal(t, tokenSession, dec) - - sign, err := issuerSigner.Sign(signedData) - require.NoError(t, err) - - require.NoError(t, tokenSession.Sign(issuerSigner)) - require.True(t, tokenSession.VerifySignature()) - - var m v2session.Token - tokenSession.WriteToV2(&m) - - require.Equal(t, m.GetSignature().GetSign(), sign) +func TestObject_LimitByObjects(t *testing.T) { + var c session.Object + checkMatch := func(c session.Object, objs []oid.ID) { + for i := range objs { + require.True(t, c.AssertObject(objs[i])) + } + } + checkMismatch := func(c session.Object, objs []oid.ID) { + for i := range objs { + require.False(t, c.AssertObject(objs[i])) + } + } - test.SignedDataComponentUser(t, issuerSigner, &tokenSession) + objs := oidtest.NIDs(4) + checkMatch(c, objs) + + c.LimitByObjects(objs[:2]) + checkMatch(c, objs[:2]) + checkMismatch(c, objs[2:]) + + c.LimitByObjects(objs[2:]) + checkMismatch(c, objs[:2]) + checkMatch(c, objs[2:]) + + t.Run("encoding", func(t *testing.T) { + t.Run("binary", func(t *testing.T) { + var src, dst session.Object + + dst.LimitByObjects(oidtest.NIDs(3)) + err := dst.Unmarshal(src.Marshal()) + require.NoError(t, err) + checkMatch(dst, objs) + + src.LimitByObjects(objs[:2]) + err = dst.Unmarshal(src.Marshal()) + require.NoError(t, err) + checkMatch(dst, objs[:2]) + checkMismatch(dst, objs[2:]) + + src.LimitByObjects(objs[2:]) + err = dst.Unmarshal(src.Marshal()) + require.NoError(t, err) + checkMismatch(dst, objs[:2]) + checkMatch(dst, objs[2:]) + }) + t.Run("api", func(t *testing.T) { + var src, dst session.Object + var msg apisession.SessionToken + + // set required data just to satisfy decoder + setRequiredObjectAPIFields(&src) + + dst.LimitByObjects(oidtest.NIDs(3)) + src.WriteToV2(&msg) + require.Zero(t, msg.Body.Context.(*apisession.SessionToken_Body_Object).Object.Target.Objects) + require.NoError(t, dst.ReadFromV2(&msg)) + checkMatch(dst, objs) + + src.LimitByObjects(objs[:2]) + src.WriteToV2(&msg) + require.ElementsMatch(t, []*refs.ObjectID{ + {Value: objs[0][:]}, + {Value: objs[1][:]}, + }, msg.Body.Context.(*apisession.SessionToken_Body_Object).Object.Target.Objects) + require.NoError(t, dst.ReadFromV2(&msg)) + checkMatch(dst, objs[:2]) + checkMismatch(dst, objs[2:]) + + src.LimitByObjects(objs[2:]) + src.WriteToV2(&msg) + require.ElementsMatch(t, []*refs.ObjectID{ + {Value: objs[2][:]}, + {Value: objs[3][:]}, + }, msg.Body.Context.(*apisession.SessionToken_Body_Object).Object.Target.Objects) + require.NoError(t, dst.ReadFromV2(&msg)) + checkMismatch(dst, objs[:2]) + checkMatch(dst, objs[2:]) + }) + t.Run("json", func(t *testing.T) { + var src, dst session.Object + + dst.LimitByObjects(oidtest.NIDs(3)) + j, err := src.MarshalJSON() + require.NoError(t, err) + err = dst.UnmarshalJSON(j) + require.NoError(t, err) + checkMatch(dst, objs) + + src.LimitByObjects(objs[:2]) + j, err = src.MarshalJSON() + require.NoError(t, err) + err = dst.UnmarshalJSON(j) + require.NoError(t, err) + checkMatch(dst, objs[:2]) + checkMismatch(dst, objs[2:]) + + src.LimitByObjects(objs[2:]) + j, err = src.MarshalJSON() + require.NoError(t, err) + err = dst.UnmarshalJSON(j) + require.NoError(t, err) + checkMismatch(dst, objs[:2]) + checkMatch(dst, objs[2:]) + }) + }) } diff --git a/session/test/session.go b/session/test/session.go index 88d1e53e2..a026e89aa 100644 --- a/session/test/session.go +++ b/session/test/session.go @@ -4,6 +4,8 @@ import ( "crypto/ecdsa" "crypto/elliptic" "crypto/rand" + "fmt" + mrand "math/rand" "github.com/google/uuid" cidtest "github.com/nspcc-dev/neofs-sdk-go/container/id/test" @@ -11,12 +13,10 @@ import ( oidtest "github.com/nspcc-dev/neofs-sdk-go/object/id/test" "github.com/nspcc-dev/neofs-sdk-go/session" "github.com/nspcc-dev/neofs-sdk-go/user" + usertest "github.com/nspcc-dev/neofs-sdk-go/user/test" ) -// Container returns random session.Container. -// -// Resulting token is unsigned. -func Container() session.Container { +func container(sign bool) session.Container { var tok session.Container priv, err := ecdsa.GenerateKey(elliptic.P256(), rand.Reader) @@ -24,35 +24,39 @@ func Container() session.Container { panic(err) } - tok.ForVerb(session.VerbContainerPut) + tok.ForVerb(session.ContainerVerb(mrand.Uint32() % 20)) tok.ApplyOnlyTo(cidtest.ID()) tok.SetID(uuid.New()) tok.SetAuthKey((*neofsecdsa.PublicKey)(&priv.PublicKey)) - tok.SetExp(11) - tok.SetNbf(22) - tok.SetIat(33) + tok.SetExp(mrand.Uint64()) + tok.SetNbf(mrand.Uint64()) + tok.SetIat(mrand.Uint64()) + tok.SetIssuer(usertest.ID()) + + if sign { + if err = tok.Sign(user.NewAutoIDSigner(*priv)); err != nil { + panic(fmt.Errorf("unexpected sign error: %w", err)) + } + } + + if err = tok.Unmarshal(tok.Marshal()); err != nil { // to fill utility fields + panic(fmt.Errorf("unexpected container session encode-decode failure: %w", err)) + } return tok } -// ContainerSigned returns signed random session.Container. -// -// Panics if token could not be signed (actually unexpected). -func ContainerSigned(signer user.Signer) session.Container { - tok := Container() - - err := tok.Sign(signer) - if err != nil { - panic(err) - } +// Container returns random session.Container. +func Container() session.Container { + return container(true) +} - return tok +// ContainerUnsigned returns random unsigned session.Container. +func ContainerUnsigned() session.Container { + return container(false) } -// Object returns random session.Object. -// -// Resulting token is unsigned. -func Object() session.Object { +func object(sign bool) session.Object { var tok session.Object priv, err := ecdsa.GenerateKey(elliptic.P256(), rand.Reader) @@ -60,28 +64,31 @@ func Object() session.Object { panic(err) } - tok.ForVerb(session.VerbObjectPut) + tok.ForVerb(session.ObjectVerb(mrand.Uint32() % 20)) tok.BindContainer(cidtest.ID()) - tok.LimitByObjects(oidtest.ID(), oidtest.ID()) + tok.LimitByObjects(oidtest.NIDs(1 + mrand.Int()%3)) tok.SetID(uuid.New()) tok.SetAuthKey((*neofsecdsa.PublicKey)(&priv.PublicKey)) - tok.SetExp(11) - tok.SetNbf(22) - tok.SetIat(33) - - return tok -} - -// ObjectSigned returns signed random session.Object. -// -// Panics if token could not be signed (actually unexpected). -func ObjectSigned(signer user.Signer) session.Object { - tok := Object() + tok.SetExp(mrand.Uint64()) + tok.SetNbf(mrand.Uint64()) + tok.SetIat(mrand.Uint64()) + tok.SetIssuer(usertest.ID()) + + if sign { + if err = tok.Sign(user.NewAutoIDSigner(*priv)); err != nil { + panic(fmt.Errorf("unexpected sign error: %w", err)) + } + } - err := tok.Sign(signer) - if err != nil { - panic(err) + if err = tok.Unmarshal(tok.Marshal()); err != nil { // to fill utility fields + panic(fmt.Errorf("unexpected object session encode-decode failure: %w", err)) } return tok } + +// Object returns random session.Object. +func Object() session.Object { return object(true) } + +// ObjectUnsigned returns random unsigned session.Object. +func ObjectUnsigned() session.Object { return object(false) } diff --git a/session/test/session_test.go b/session/test/session_test.go new file mode 100644 index 000000000..dbeacbf21 --- /dev/null +++ b/session/test/session_test.go @@ -0,0 +1,86 @@ +package sessiontest_test + +import ( + "testing" + + apisession "github.com/nspcc-dev/neofs-sdk-go/api/session" + "github.com/nspcc-dev/neofs-sdk-go/session" + sessiontest "github.com/nspcc-dev/neofs-sdk-go/session/test" + "github.com/stretchr/testify/require" +) + +func TestContainer(t *testing.T) { + v := sessiontest.Container() + require.NotEqual(t, v, sessiontest.Container()) + require.True(t, v.VerifySignature()) + + var v2 session.Container + require.NoError(t, v2.Unmarshal(v.Marshal())) + require.Equal(t, v, v2) + + var m apisession.SessionToken + v.WriteToV2(&m) + var v3 session.Container + require.NoError(t, v3.ReadFromV2(&m)) + require.Equal(t, v, v3) + + j, err := v.MarshalJSON() + require.NoError(t, err) + var v4 session.Container + require.NoError(t, v4.UnmarshalJSON(j)) + require.Equal(t, v, v4) +} + +func TestContainerUnsigned(t *testing.T) { + v := sessiontest.ContainerUnsigned() + require.NotEqual(t, v, sessiontest.ContainerUnsigned()) + require.False(t, v.VerifySignature()) + + var v2 session.Container + require.NoError(t, v2.Unmarshal(v.Marshal())) + require.Equal(t, v, v2) + + j, err := v.MarshalJSON() + require.NoError(t, err) + var v3 session.Container + require.NoError(t, v3.UnmarshalJSON(j)) + require.Equal(t, v, v3) +} + +func TestObject(t *testing.T) { + v := sessiontest.Object() + require.NotEqual(t, v, sessiontest.Object()) + require.True(t, v.VerifySignature()) + + var v2 session.Object + require.NoError(t, v2.Unmarshal(v.Marshal())) + require.Equal(t, v, v2) + + var m apisession.SessionToken + v.WriteToV2(&m) + var v3 session.Object + require.NoError(t, v3.ReadFromV2(&m)) + require.Equal(t, v, v3) + + j, err := v.MarshalJSON() + require.NoError(t, err) + var v4 session.Object + require.NoError(t, v4.UnmarshalJSON(j)) + require.Equal(t, v, v4) +} + +func TestObjectUnsigned(t *testing.T) { + v := sessiontest.ObjectUnsigned() + require.NotEqual(t, v, sessiontest.ObjectUnsigned()) + require.False(t, v.VerifySignature()) + + var v2 session.Object + require.NoError(t, v2.Unmarshal(v.Marshal())) + require.Equal(t, v, v2) + + j, err := v.MarshalJSON() + require.NoError(t, err) + var v3 session.Object + require.NoError(t, v3.UnmarshalJSON(j)) + require.Equal(t, v, v3) +} diff --git a/storagegroup/example_test.go b/storagegroup/example_test.go deleted file mode 100644 index e6cdc71f6..000000000 --- a/storagegroup/example_test.go +++ /dev/null @@ -1,34 +0,0 @@ -package storagegroup_test - -import ( - apiGoStoragegroup "github.com/nspcc-dev/neofs-api-go/v2/storagegroup" - "github.com/nspcc-dev/neofs-sdk-go/storagegroup" -) - -// StorageGroup type groups verification values for Data Audit sessions. -func ExampleStorageGroup_validation() { - // receive sg info - - var sg storagegroup.StorageGroup - - sg.ExpirationEpoch() // expiration of the storage group - sg.Members() // objects in the group - sg.ValidationDataHash() // hash for objects validation - sg.ValidationDataSize() // total objects' payload size -} - -// Instances can be also used to process NeoFS API V2 protocol messages with [https://github.com/nspcc-dev/neofs-api] package. -func ExampleStorageGroup_marshalling() { - // import apiGoStoragegroup "github.com/nspcc-dev/neofs-api-go/v2/storagegroup" - - // On the client side. - - var sg storagegroup.StorageGroup - var msg apiGoStoragegroup.StorageGroup - sg.WriteToV2(&msg) - // *send message* - - // On the server side. - - _ = sg.ReadFromV2(msg) -} diff --git a/storagegroup/storagegroup.go b/storagegroup/storagegroup.go index d9d89dea6..a03e0a9c9 100644 --- a/storagegroup/storagegroup.go +++ b/storagegroup/storagegroup.go @@ -1,87 +1,29 @@ package storagegroup import ( - "errors" "fmt" - "strconv" - objectV2 "github.com/nspcc-dev/neofs-api-go/v2/object" - "github.com/nspcc-dev/neofs-api-go/v2/refs" - "github.com/nspcc-dev/neofs-api-go/v2/storagegroup" + "github.com/nspcc-dev/neofs-sdk-go/api/refs" + "github.com/nspcc-dev/neofs-sdk-go/api/storagegroup" "github.com/nspcc-dev/neofs-sdk-go/checksum" - objectSDK "github.com/nspcc-dev/neofs-sdk-go/object" oid "github.com/nspcc-dev/neofs-sdk-go/object/id" + "google.golang.org/protobuf/proto" ) -// StorageGroup represents storage group of the NeoFS objects. +// StorageGroup represents storage group of the NeoFS objects. StorageGroup is +// stored and transmitted as payload of system NeoFS objects. // -// StorageGroup is mutually compatible with github.com/nspcc-dev/neofs-api-go/v2/storagegroup.StorageGroup -// message. See ReadFromMessageV2 / WriteToMessageV2 methods. +// StorageGroup is mutually compatible with [storagegroup.StorageGroup] message. +// See [StorageGroup.ReadFromV2] / [StorageGroup.WriteToV2] methods. // // Instances can be created using built-in var declaration. -// -// Note that direct typecast is not safe and may result in loss of compatibility: -// -// _ = StorageGroup(storagegroup.StorageGroup) // not recommended -type StorageGroup storagegroup.StorageGroup - -// reads StorageGroup from the storagegroup.StorageGroup message. If checkFieldPresence is set, -// returns an error on absence of any protocol-required field. -func (sg *StorageGroup) readFromV2(m storagegroup.StorageGroup, checkFieldPresence bool) error { - var err error - - h := m.GetValidationHash() - if h != nil { - err = new(checksum.Checksum).ReadFromV2(*h) - if err != nil { - return fmt.Errorf("invalid hash: %w", err) - } - } else if checkFieldPresence { - return errors.New("missing hash") - } - - members := m.GetMembers() - if len(members) > 0 { - var member oid.ID - mMembers := make(map[oid.ID]struct{}, len(members)) - var exits bool - - for i := range members { - err = member.ReadFromV2(members[i]) - if err != nil { - return fmt.Errorf("invalid member: %w", err) - } - - _, exits = mMembers[member] - if exits { - return fmt.Errorf("duplicated member %s", member) - } - - mMembers[member] = struct{}{} - } - } else if checkFieldPresence { - return errors.New("missing members") - } - - *sg = StorageGroup(m) - - return nil -} - -// ReadFromV2 reads StorageGroup from the storagegroup.StorageGroup message. -// Checks if the message conforms to NeoFS API V2 protocol. -// -// See also WriteToV2. -func (sg *StorageGroup) ReadFromV2(m storagegroup.StorageGroup) error { - return sg.readFromV2(m, true) -} +type StorageGroup struct { + sz uint64 + exp uint64 + ids []oid.ID -// WriteToV2 writes StorageGroup to the storagegroup.StorageGroup message. -// The message must not be nil. -// -// See also ReadFromV2. -func (sg StorageGroup) WriteToV2(m *storagegroup.StorageGroup) { - *m = (storagegroup.StorageGroup)(sg) + csSet bool + cs checksum.Checksum } // ValidationDataSize returns total size of the payloads @@ -89,18 +31,17 @@ func (sg StorageGroup) WriteToV2(m *storagegroup.StorageGroup) { // // Zero StorageGroup has 0 data size. // -// See also SetValidationDataSize. +// See also [StorageGroup.SetValidationDataSize]. func (sg StorageGroup) ValidationDataSize() uint64 { - v2 := (storagegroup.StorageGroup)(sg) - return v2.GetValidationDataSize() + return sg.sz } // SetValidationDataSize sets total size of the payloads // of objects in the storage group. // -// See also ValidationDataSize. -func (sg *StorageGroup) SetValidationDataSize(epoch uint64) { - (*storagegroup.StorageGroup)(sg).SetValidationDataSize(epoch) +// See also [StorageGroup.ValidationDataSize]. +func (sg *StorageGroup) SetValidationDataSize(sz uint64) { + sg.sz = sz } // ValidationDataHash returns homomorphic hash from the @@ -108,224 +49,193 @@ func (sg *StorageGroup) SetValidationDataSize(epoch uint64) { // and bool that indicates checksum presence in the storage // group. // -// Zero StorageGroup does not have validation data checksum. +// Zero StorageGroup does not have validation data checksum (zero type). // -// See also SetValidationDataHash. -func (sg StorageGroup) ValidationDataHash() (v checksum.Checksum, isSet bool) { - v2 := (storagegroup.StorageGroup)(sg) - if checksumV2 := v2.GetValidationHash(); checksumV2 != nil { - err := v.ReadFromV2(*checksumV2) - isSet = (err == nil) +// See also [StorageGroup.SetValidationDataHash]. +func (sg StorageGroup) ValidationDataHash() checksum.Checksum { + if sg.csSet { + return sg.cs } - - return + return checksum.Checksum{} } // SetValidationDataHash sets homomorphic hash from the // concatenation of the payloads of the storage group members. // -// See also ValidationDataHash. +// See also [StorageGroup.ValidationDataHash]. func (sg *StorageGroup) SetValidationDataHash(hash checksum.Checksum) { - var v2 refs.Checksum - hash.WriteToV2(&v2) - - (*storagegroup.StorageGroup)(sg).SetValidationHash(&v2) -} - -// ExpirationEpoch returns last NeoFS epoch number -// of the storage group lifetime. -// -// Zero StorageGroup has 0 expiration epoch. -// -// See also SetExpirationEpoch. -func (sg StorageGroup) ExpirationEpoch() uint64 { - v2 := (storagegroup.StorageGroup)(sg) - // nolint:staticcheck - return v2.GetExpirationEpoch() -} - -// SetExpirationEpoch sets last NeoFS epoch number -// of the storage group lifetime. -// -// See also ExpirationEpoch. -func (sg *StorageGroup) SetExpirationEpoch(epoch uint64) { - // nolint:staticcheck - (*storagegroup.StorageGroup)(sg).SetExpirationEpoch(epoch) + sg.cs, sg.csSet = hash, true } -// Members returns strictly ordered list of -// storage group member objects. +// Members returns strictly ordered list of storage group member objects. // -// Zero StorageGroup has nil members value. +// Zero StorageGroup has no members. // -// See also SetMembers. +// See also [StorageGroup.SetMembers]. func (sg StorageGroup) Members() []oid.ID { - v2 := (storagegroup.StorageGroup)(sg) - mV2 := v2.GetMembers() - - if mV2 == nil { - return nil - } - - m := make([]oid.ID, len(mV2)) - - for i := range mV2 { - _ = m[i].ReadFromV2(mV2[i]) - } - - return m + return sg.ids } // SetMembers sets strictly ordered list of // storage group member objects. // -// See also Members. +// See also [StorageGroup.Members]. func (sg *StorageGroup) SetMembers(members []oid.ID) { - mV2 := (*storagegroup.StorageGroup)(sg).GetMembers() - - if members == nil { - mV2 = nil - } else { - ln := len(members) - - if cap(mV2) >= ln { - mV2 = mV2[:0] - } else { - mV2 = make([]refs.ObjectID, 0, ln) - } - - var oidV2 refs.ObjectID - - for i := 0; i < ln; i++ { - members[i].WriteToV2(&oidV2) - mV2 = append(mV2, oidV2) - } - } - - (*storagegroup.StorageGroup)(sg).SetMembers(mV2) + sg.ids = members } -// Marshal marshals StorageGroup into a protobuf binary form. +// Marshal encodes StorageGroup into a Protocol Buffers V3 binary format. // -// See also Unmarshal. -func (sg StorageGroup) Marshal() ([]byte, error) { - return (*storagegroup.StorageGroup)(&sg).StableMarshal(nil), nil -} - -// Unmarshal unmarshals protobuf binary representation of StorageGroup. -// -// See also Marshal. -func (sg *StorageGroup) Unmarshal(data []byte) error { - v2 := (*storagegroup.StorageGroup)(sg) - err := v2.Unmarshal(data) - if err != nil { - return err +// See also [StorageGroup.Unmarshal]. +func (sg StorageGroup) Marshal() []byte { + m := storagegroup.StorageGroup{ + ValidationDataSize: sg.sz, + ExpirationEpoch: sg.exp, } - return sg.readFromV2(*v2, false) -} + if sg.csSet { + m.ValidationHash = new(refs.Checksum) + sg.cs.WriteToV2(m.ValidationHash) + } -// MarshalJSON encodes StorageGroup to protobuf JSON format. -// -// See also UnmarshalJSON. -func (sg StorageGroup) MarshalJSON() ([]byte, error) { - v2 := (storagegroup.StorageGroup)(sg) - return v2.MarshalJSON() -} + if sg.ids != nil { + m.Members = make([]*refs.ObjectID, len(sg.ids)) + for i := range sg.ids { + m.Members[i] = new(refs.ObjectID) + sg.ids[i].WriteToV2(m.Members[i]) + } + } -// UnmarshalJSON decodes StorageGroup from protobuf JSON format. -// -// See also MarshalJSON. -func (sg *StorageGroup) UnmarshalJSON(data []byte) error { - v2 := (*storagegroup.StorageGroup)(sg) - err := v2.UnmarshalJSON(data) + b, err := proto.Marshal(&m) if err != nil { - return err + // while it is bad to panic on external package return, we can do nothing better + // for this case: how can a normal message not be encoded? + panic(fmt.Errorf("unexpected marshal protobuf message failure: %w", err)) } - return sg.readFromV2(*v2, false) + return b } -// ReadFromObject assemble StorageGroup from a regular -// Object structure. Object must contain unambiguous information -// about its expiration epoch, otherwise behaviour is undefined. +// Unmarshal decodes Protocol Buffers V3 binary data into the StorageGroup. +// Returns an error describing a format violation of the specified fields. +// Unmarshal does not check presence of the required fields and, at the same +// time, checks format of presented fields. // -// Returns any error appeared during storage group parsing; returns -// error if object is not of TypeStorageGroup type. -func ReadFromObject(sg *StorageGroup, o objectSDK.Object) error { - if typ := o.Type(); typ != objectSDK.TypeStorageGroup { - return fmt.Errorf("object is not of StorageGroup type: %v", typ) - } - - err := sg.Unmarshal(o.Payload()) +// See also [StorageGroup.Marshal]. +func (sg *StorageGroup) Unmarshal(data []byte) error { + var m storagegroup.StorageGroup + err := proto.Unmarshal(data, &m) if err != nil { - return fmt.Errorf("could not unmarshal object: %w", err) + return fmt.Errorf("decode protobuf: %w", err) } - var expObj uint64 + if sg.csSet = m.ValidationHash != nil; sg.csSet { + err = sg.cs.ReadFromV2(m.ValidationHash) + if err != nil { + return fmt.Errorf("invalid hash: %w", err) + } + } - for _, attr := range o.Attributes() { - if attr.Key() == objectV2.SysAttributeExpEpoch { - expObj, err = strconv.ParseUint(attr.Value(), 10, 64) + if len(m.Members) > 0 { + sg.ids = make([]oid.ID, len(m.Members)) + for i := range m.Members { + if m.Members[i] == nil { + return fmt.Errorf("member #%d is nil", m.Members[i]) + } + err = sg.ids[i].ReadFromV2(m.Members[i]) if err != nil { - return fmt.Errorf("could not get expiration from object: %w", err) + return fmt.Errorf("invalid member #%d: %w", i, err) } - break + for j := 0; j < i; j++ { + if sg.ids[i] == sg.ids[j] { + return fmt.Errorf("duplicated member %s", sg.ids[i]) + } + } } + } else { + sg.ids = nil } - // Supporting deprecated functionality. - // See https://github.com/nspcc-dev/neofs-api/pull/205. - if expSG := sg.ExpirationEpoch(); expObj != expSG { - return fmt.Errorf( - "expiration does not match: from object: %d, from payload: %d", - expObj, expSG) - } + sg.sz = m.ValidationDataSize + sg.exp = m.ExpirationEpoch return nil } -// WriteToObject writes StorageGroup to a regular -// Object structure. Object must not contain ambiguous -// information about its expiration epoch or must not -// have it at all. -// -// Written information: -// - expiration epoch; -// - object type (TypeStorageGroup); -// - raw payload. -func WriteToObject(sg StorageGroup, o *objectSDK.Object) { - sgRaw, err := sg.Marshal() - if err != nil { - // Marshal() does not return errors - // in the next API release - panic(fmt.Errorf("could not marshal storage group: %w", err)) - } - - o.SetPayload(sgRaw) - o.SetType(objectSDK.TypeStorageGroup) - - attrs := o.Attributes() - var expAttrFound bool - - for i := range attrs { - if attrs[i].Key() == objectV2.SysAttributeExpEpoch { - expAttrFound = true - attrs[i].SetValue(strconv.FormatUint(sg.ExpirationEpoch(), 10)) - - break - } - } - - if !expAttrFound { - var attr objectSDK.Attribute - - attr.SetKey(objectV2.SysAttributeExpEpoch) - attr.SetValue(strconv.FormatUint(sg.ExpirationEpoch(), 10)) - - attrs = append(attrs, attr) - } - - o.SetAttributes(attrs...) -} +// // ReadFromObject reads StorageGroup from the NeoFS object. Object must contain +// // unambiguous information about its expiration epoch, otherwise behaviour is +// // undefined. +// // +// // Returns any error appeared during storage group parsing; returns error if +// // object is not of [object.TypeStorageGroup] type. +// func ReadFromObject(sg *StorageGroup, o object.Object) error { +// if typ := o.Type(); typ != object.TypeStorageGroup { +// return fmt.Errorf("object is not of StorageGroup type: %v", typ) +// } +// +// err := sg.Unmarshal(o.Payload()) +// if err != nil { +// return fmt.Errorf("could not unmarshal object: %w", err) +// } +// +// var expObj uint64 +// +// for _, attr := range o.Attributes() { +// if attr.Key() == object.AttributeExpirationEpoch { +// expObj, err = strconv.ParseUint(attr.Value(), 10, 64) +// if err != nil { +// return fmt.Errorf("could not get expiration from object: %w", err) +// } +// +// break +// } +// } +// +// // Supporting deprecated functionality. +// // See https://github.com/nspcc-dev/neofs-api/pull/205. +// if expObj != sg.exp { +// return fmt.Errorf( +// "expiration does not match: from object: %d, from payload: %d", +// expObj, sg.exp) +// } +// +// return nil +// } +// +// // WriteToObject writes StorageGroup to the NeoFS object. Object must not +// // contain ambiguous information about its expiration epoch or must not have it +// // at all. +// // +// // Written information: +// // - expiration epoch; +// // - object type ([object.TypeStorageGroup]); +// // - raw payload. +// func WriteToObject(sg StorageGroup, o *object.Object) { +// o.SetPayload(sg.Marshal()) +// o.SetType(object.TypeStorageGroup) +// +// // TODO: simplify object attribute setting like for container +// attrs := o.Attributes() +// var expAttrFound bool +// +// for i := range attrs { +// if attrs[i].Key() == object.AttributeExpirationEpoch { +// expAttrFound = true +// attrs[i].SetValue(strconv.FormatUint(sg.exp, 10)) +// +// break +// } +// } +// +// if !expAttrFound { +// var attr object.Attribute +// +// attr.SetKey(object.AttributeExpirationEpoch) +// attr.SetValue(strconv.FormatUint(sg.exp, 10)) +// +// attrs = append(attrs, attr) +// } +// +// o.SetAttributes(attrs...) +// } diff --git a/storagegroup/storagegroup_test.go b/storagegroup/storagegroup_test.go index 82b1c2352..35f0e1105 100644 --- a/storagegroup/storagegroup_test.go +++ b/storagegroup/storagegroup_test.go @@ -1,290 +1,258 @@ package storagegroup_test import ( - "crypto/sha256" - "strconv" + "fmt" + "math/rand" "testing" - objectV2 "github.com/nspcc-dev/neofs-api-go/v2/object" - "github.com/nspcc-dev/neofs-api-go/v2/refs" - storagegroupV2 "github.com/nspcc-dev/neofs-api-go/v2/storagegroup" - storagegroupV2test "github.com/nspcc-dev/neofs-api-go/v2/storagegroup/test" - "github.com/nspcc-dev/neofs-sdk-go/checksum" + "github.com/nspcc-dev/neofs-sdk-go/api/refs" + apistoragegroup "github.com/nspcc-dev/neofs-sdk-go/api/storagegroup" checksumtest "github.com/nspcc-dev/neofs-sdk-go/checksum/test" - objectSDK "github.com/nspcc-dev/neofs-sdk-go/object" - oid "github.com/nspcc-dev/neofs-sdk-go/object/id" oidtest "github.com/nspcc-dev/neofs-sdk-go/object/id/test" "github.com/nspcc-dev/neofs-sdk-go/storagegroup" - storagegrouptest "github.com/nspcc-dev/neofs-sdk-go/storagegroup/test" "github.com/stretchr/testify/require" + "google.golang.org/protobuf/proto" ) -func TestStorageGroup(t *testing.T) { - var sg storagegroup.StorageGroup - - sz := uint64(13) - sg.SetValidationDataSize(sz) - require.Equal(t, sz, sg.ValidationDataSize()) - - cs := checksumtest.Checksum() - sg.SetValidationDataHash(cs) - cs2, set := sg.ValidationDataHash() - - require.True(t, set) - require.Equal(t, cs, cs2) - - exp := uint64(33) - sg.SetExpirationEpoch(exp) - require.Equal(t, exp, sg.ExpirationEpoch()) - - members := []oid.ID{oidtest.ID(), oidtest.ID()} - sg.SetMembers(members) - require.Equal(t, members, sg.Members()) -} - -func TestStorageGroup_ReadFromV2(t *testing.T) { - t.Run("from zero", func(t *testing.T) { - var ( - x storagegroup.StorageGroup - v2 storagegroupV2.StorageGroup - ) - - require.Error(t, x.ReadFromV2(v2)) +func TestStorageGroupDecoding(t *testing.T) { + id := oidtest.ID() + fmt.Println(id[:]) + fmt.Println(id) + t.Run("invalid binary", func(t *testing.T) { + var sg storagegroup.StorageGroup + msg := []byte("definitely_not_protobuf") + err := sg.Unmarshal(msg) + require.ErrorContains(t, err, "decode protobuf") }) - - t.Run("from non-zero", func(t *testing.T) { - var ( - x storagegroup.StorageGroup - v2 = storagegroupV2test.GenerateStorageGroup(false) - ) - - // https://github.com/nspcc-dev/neofs-api-go/issues/394 - v2.SetMembers(generateOIDList()) - - size := v2.GetValidationDataSize() - // nolint:staticcheck - epoch := v2.GetExpirationEpoch() - mm := v2.GetMembers() - hashV2 := v2.GetValidationHash() - - require.NoError(t, x.ReadFromV2(*v2)) - - require.Equal(t, epoch, x.ExpirationEpoch()) - require.Equal(t, size, x.ValidationDataSize()) - - var hash checksum.Checksum - require.NoError(t, hash.ReadFromV2(*hashV2)) - h, set := x.ValidationDataHash() - require.True(t, set) - require.Equal(t, hash, h) - - var oidV2 refs.ObjectID - - for i, m := range mm { - x.Members()[i].WriteToV2(&oidV2) - require.Equal(t, m, oidV2) + t.Run("invalid fields", func(t *testing.T) { + for _, testCase := range []struct { + name, err string + corrupt func(*apistoragegroup.StorageGroup) + }{ + {name: "checksum/value/nil", err: "invalid hash: missing value", corrupt: func(sg *apistoragegroup.StorageGroup) { + sg.ValidationHash = new(refs.Checksum) + }}, + {name: "checksum/value/empty", err: "invalid hash: missing value", corrupt: func(sg *apistoragegroup.StorageGroup) { + sg.ValidationHash = &refs.Checksum{Sum: []byte{}} + }}, + {name: "members/nil", err: "invalid member #1: missing value field", corrupt: func(sg *apistoragegroup.StorageGroup) { + sg.Members = []*refs.ObjectID{ + {Value: make([]byte, 32)}, + nil, + } + }}, + {name: "members/value/nil", err: "invalid member #1: missing value field", corrupt: func(sg *apistoragegroup.StorageGroup) { + sg.Members = []*refs.ObjectID{ + {Value: make([]byte, 32)}, + {Value: nil}, + } + }}, + {name: "members/value/empty", err: "invalid member #1: missing value field", corrupt: func(sg *apistoragegroup.StorageGroup) { + sg.Members = []*refs.ObjectID{ + {Value: make([]byte, 32)}, + {Value: []byte{}}, + } + }}, + {name: "members/value/wrong length", err: "invalid member #1: invalid value length 31", corrupt: func(sg *apistoragegroup.StorageGroup) { + sg.Members = []*refs.ObjectID{ + {Value: make([]byte, 32)}, + {Value: make([]byte, 31)}, + } + }}, + {name: "members/duplicated", err: "duplicated member EMwQfxfrUrLwnYxZDRBCe8NwyThefNPni7s1QLQXWay7", corrupt: func(sg *apistoragegroup.StorageGroup) { + sg.Members = []*refs.ObjectID{ + {Value: []byte{198, 133, 16, 209, 121, 137, 128, 158, 158, 74, 248, 227, 131, 233, 166, 249, 7, 111, 24, 55, 189, 32, 76, 140, 146, 7, 123, 228, 49, 198, 58, 98}}, + {Value: make([]byte, 32)}, + {Value: []byte{198, 133, 16, 209, 121, 137, 128, 158, 158, 74, 248, 227, 131, 233, 166, 249, 7, 111, 24, 55, 189, 32, 76, 140, 146, 7, 123, 228, 49, 198, 58, 98}}, + } + }}, + } { + t.Run(testCase.name, func(t *testing.T) { + var src, dst storagegroup.StorageGroup + var m apistoragegroup.StorageGroup + + require.NoError(t, proto.Unmarshal(src.Marshal(), &m)) + testCase.corrupt(&m) + b, err := proto.Marshal(&m) + require.NoError(t, err) + require.ErrorContains(t, dst.Unmarshal(b), testCase.err) + }) } }) } -func TestStorageGroupEncoding(t *testing.T) { - sg := storagegrouptest.StorageGroup() - - t.Run("binary", func(t *testing.T) { - data, err := sg.Marshal() - require.NoError(t, err) - - var sg2 storagegroup.StorageGroup - require.NoError(t, sg2.Unmarshal(data)) - - require.Equal(t, sg, sg2) - }) - - t.Run("json", func(t *testing.T) { - data, err := sg.MarshalJSON() - require.NoError(t, err) - - var sg2 storagegroup.StorageGroup - require.NoError(t, sg2.UnmarshalJSON(data)) - - require.Equal(t, sg, sg2) - }) -} - -func TestStorageGroup_WriteToV2(t *testing.T) { - t.Run("zero to v2", func(t *testing.T) { - var ( - x storagegroup.StorageGroup - v2 storagegroupV2.StorageGroup - ) - - x.WriteToV2(&v2) - - require.Nil(t, v2.GetValidationHash()) - require.Nil(t, v2.GetMembers()) - require.Zero(t, v2.GetValidationDataSize()) - // nolint:staticcheck - require.Zero(t, v2.GetExpirationEpoch()) - }) - - t.Run("non-zero to v2", func(t *testing.T) { - var ( - x = storagegrouptest.StorageGroup() - v2 storagegroupV2.StorageGroup - ) - - x.WriteToV2(&v2) - - // nolint:staticcheck - require.Equal(t, x.ExpirationEpoch(), v2.GetExpirationEpoch()) - require.Equal(t, x.ValidationDataSize(), v2.GetValidationDataSize()) +func TestStorageGroup_SetValidationDataSize(t *testing.T) { + var sg storagegroup.StorageGroup - var hash checksum.Checksum - require.NoError(t, hash.ReadFromV2(*v2.GetValidationHash())) + require.Zero(t, sg.ValidationDataSize()) - h, set := x.ValidationDataHash() - require.True(t, set) - require.Equal(t, h, hash) + val := rand.Uint64() + sg.SetValidationDataSize(val) + require.EqualValues(t, val, sg.ValidationDataSize()) - var oidV2 refs.ObjectID + otherVal := val + 1 + sg.SetValidationDataSize(otherVal) + require.EqualValues(t, otherVal, sg.ValidationDataSize()) - for i, m := range x.Members() { - m.WriteToV2(&oidV2) - require.Equal(t, oidV2, v2.GetMembers()[i]) - } - }) -} + t.Run("encoding", func(t *testing.T) { + t.Run("binary", func(t *testing.T) { + var src, dst storagegroup.StorageGroup -func TestNew(t *testing.T) { - t.Run("default values", func(t *testing.T) { - var sg storagegroup.StorageGroup + dst.SetValidationDataSize(val) + require.NoError(t, dst.Unmarshal(src.Marshal())) + require.Zero(t, dst.ValidationDataSize()) - // check initial values - require.Nil(t, sg.Members()) - _, set := sg.ValidationDataHash() - require.False(t, set) - require.Zero(t, sg.ExpirationEpoch()) - require.Zero(t, sg.ValidationDataSize()) + src.SetValidationDataSize(val) + require.NoError(t, dst.Unmarshal(src.Marshal())) + require.EqualValues(t, val, dst.ValidationDataSize()) + }) }) } -func generateOIDList() []refs.ObjectID { - const size = 3 - - mmV2 := make([]refs.ObjectID, size) - for i := 0; i < size; i++ { - oidV2 := make([]byte, sha256.Size) - oidV2[i] = byte(i) - - mmV2[i].SetValue(oidV2) - } - - return mmV2 -} - -func TestStorageGroup_SetMembers_DoubleSetting(t *testing.T) { +func TestStorageGroup_SetValidationDataHash(t *testing.T) { var sg storagegroup.StorageGroup - mm := []oid.ID{oidtest.ID(), oidtest.ID(), oidtest.ID()} // cap is 3 at least - require.NotPanics(t, func() { - sg.SetMembers(mm) - }) - - require.NotPanics(t, func() { - // the previous cap is more that a new length; - // slicing should not lead to `out of range` - // and apply update correctly - sg.SetMembers(mm[:1]) - }) -} - -func TestStorageGroupFromObject(t *testing.T) { - sg := storagegrouptest.StorageGroup() - - var o objectSDK.Object - - var expAttr objectSDK.Attribute - expAttr.SetKey(objectV2.SysAttributeExpEpoch) - expAttr.SetValue(strconv.FormatUint(sg.ExpirationEpoch(), 10)) - - sgRaw, err := sg.Marshal() - require.NoError(t, err) - - o.SetPayload(sgRaw) - o.SetType(objectSDK.TypeStorageGroup) + require.Zero(t, sg.ValidationDataHash()) - t.Run("correct object", func(t *testing.T) { - o.SetAttributes(objectSDK.Attribute{}, expAttr, objectSDK.Attribute{}) + cs := checksumtest.Checksum() + sg.SetValidationDataHash(cs) + require.Equal(t, cs, sg.ValidationDataHash()) - var sg2 storagegroup.StorageGroup - require.NoError(t, storagegroup.ReadFromObject(&sg2, o)) - require.Equal(t, sg, sg2) - }) + csOther := checksumtest.Checksum() + sg.SetValidationDataHash(csOther) + require.Equal(t, csOther, sg.ValidationDataHash()) - t.Run("incorrect exp attr", func(t *testing.T) { - var sg2 storagegroup.StorageGroup + t.Run("encoding", func(t *testing.T) { + t.Run("binary", func(t *testing.T) { + var src, dst storagegroup.StorageGroup - expAttr.SetValue(strconv.FormatUint(sg.ExpirationEpoch()+1, 10)) - o.SetAttributes(expAttr) + dst.SetValidationDataHash(cs) + require.NoError(t, dst.Unmarshal(src.Marshal())) + require.Zero(t, dst.ValidationDataHash()) - require.Error(t, storagegroup.ReadFromObject(&sg2, o)) - }) - - t.Run("incorrect object type", func(t *testing.T) { - var sg2 storagegroup.StorageGroup - - o.SetType(objectSDK.TypeTombstone) - require.Error(t, storagegroup.ReadFromObject(&sg2, o)) + src.SetValidationDataHash(cs) + require.NoError(t, dst.Unmarshal(src.Marshal())) + require.Equal(t, cs, dst.ValidationDataHash()) + }) }) } -func TestStorageGroupToObject(t *testing.T) { - sg := storagegrouptest.StorageGroup() - - sgRaw, err := sg.Marshal() - require.NoError(t, err) - - t.Run("empty object", func(t *testing.T) { - var o objectSDK.Object - storagegroup.WriteToObject(sg, &o) - - exp, found := expFromObj(t, o) - require.True(t, found) - - require.Equal(t, sgRaw, o.Payload()) - require.Equal(t, sg.ExpirationEpoch(), exp) - require.Equal(t, objectSDK.TypeStorageGroup, o.Type()) - }) +func TestStorageGroup_SetMembers(t *testing.T) { + var sg storagegroup.StorageGroup - t.Run("obj already has exp attr", func(t *testing.T) { - var o objectSDK.Object + require.Zero(t, sg.Members()) - var attr objectSDK.Attribute - attr.SetKey(objectV2.SysAttributeExpEpoch) - attr.SetValue(strconv.FormatUint(sg.ExpirationEpoch()+1, 10)) + members := oidtest.NIDs(3) + sg.SetMembers(members) + require.Equal(t, members, sg.Members()) - o.SetAttributes(objectSDK.Attribute{}, attr, objectSDK.Attribute{}) + otherMembers := oidtest.NIDs(2) + sg.SetMembers(otherMembers) + require.Equal(t, otherMembers, sg.Members()) - storagegroup.WriteToObject(sg, &o) + t.Run("encoding", func(t *testing.T) { + t.Run("binary", func(t *testing.T) { + var src, dst storagegroup.StorageGroup - exp, found := expFromObj(t, o) - require.True(t, found) + dst.SetMembers(members) + require.NoError(t, dst.Unmarshal(src.Marshal())) + require.Zero(t, dst.Members()) - require.Equal(t, sgRaw, o.Payload()) - require.Equal(t, sg.ExpirationEpoch(), exp) - require.Equal(t, objectSDK.TypeStorageGroup, o.Type()) + src.SetMembers(members) + require.NoError(t, dst.Unmarshal(src.Marshal())) + require.Equal(t, members, dst.Members()) + }) }) } -func expFromObj(t *testing.T, o objectSDK.Object) (uint64, bool) { - for _, attr := range o.Attributes() { - if attr.Key() == objectV2.SysAttributeExpEpoch { - exp, err := strconv.ParseUint(attr.Value(), 10, 64) - require.NoError(t, err) - - return exp, true - } - } - - return 0, false -} +// TODO: +// func TestStorageGroupFromObject(t *testing.T) { +// sg := storagegrouptest.StorageGroup() +// +// var o objectSDK.Object +// +// var expAttr objectSDK.Attribute +// expAttr.SetKey(objectV2.SysAttributeExpEpoch) +// expAttr.SetValue(strconv.FormatUint(sg.ExpirationEpoch(), 10)) +// +// sgRaw, err := sg.Marshal() +// require.NoError(t, err) +// +// o.SetPayload(sgRaw) +// o.SetType(objectSDK.TypeStorageGroup) +// +// t.Run("correct object", func(t *testing.T) { +// o.SetAttributes(objectSDK.Attribute{}, expAttr, objectSDK.Attribute{}) +// +// var sg2 storagegroup.StorageGroup +// require.NoError(t, storagegroup.ReadFromObject(&sg2, o)) +// require.Equal(t, sg, sg2) +// }) +// +// t.Run("incorrect exp attr", func(t *testing.T) { +// var sg2 storagegroup.StorageGroup +// +// expAttr.SetValue(strconv.FormatUint(sg.ExpirationEpoch()+1, 10)) +// o.SetAttributes(expAttr) +// +// require.Error(t, storagegroup.ReadFromObject(&sg2, o)) +// }) +// +// t.Run("incorrect object type", func(t *testing.T) { +// var sg2 storagegroup.StorageGroup +// +// o.SetType(objectSDK.TypeTombstone) +// require.Error(t, storagegroup.ReadFromObject(&sg2, o)) +// }) +// } +// +// func TestStorageGroupToObject(t *testing.T) { +// sg := storagegrouptest.StorageGroup() +// +// sgRaw, err := sg.Marshal() +// require.NoError(t, err) +// +// t.Run("empty object", func(t *testing.T) { +// var o objectSDK.Object +// storagegroup.WriteToObject(sg, &o) +// +// exp, found := expFromObj(t, o) +// require.True(t, found) +// +// require.Equal(t, sgRaw, o.Payload()) +// require.Equal(t, sg.ExpirationEpoch(), exp) +// require.Equal(t, objectSDK.TypeStorageGroup, o.Type()) +// }) +// +// t.Run("obj already has exp attr", func(t *testing.T) { +// var o objectSDK.Object +// +// var attr objectSDK.Attribute +// attr.SetKey(objectV2.SysAttributeExpEpoch) +// attr.SetValue(strconv.FormatUint(sg.ExpirationEpoch()+1, 10)) +// +// o.SetAttributes(objectSDK.Attribute{}, attr, objectSDK.Attribute{}) +// +// storagegroup.WriteToObject(sg, &o) +// +// exp, found := expFromObj(t, o) +// require.True(t, found) +// +// require.Equal(t, sgRaw, o.Payload()) +// require.Equal(t, sg.ExpirationEpoch(), exp) +// require.Equal(t, objectSDK.TypeStorageGroup, o.Type()) +// }) +// } +// +// func expFromObj(t *testing.T, o objectSDK.Object) (uint64, bool) { +// for _, attr := range o.Attributes() { +// if attr.Key() == objectV2.SysAttributeExpEpoch { +// exp, err := strconv.ParseUint(attr.Value(), 10, 64) +// require.NoError(t, err) +// +// return exp, true +// } +// } +// +// return 0, false +// } diff --git a/storagegroup/test/generate.go b/storagegroup/test/generate.go index ef1195540..59b7399b7 100644 --- a/storagegroup/test/generate.go +++ b/storagegroup/test/generate.go @@ -1,8 +1,9 @@ package storagegrouptest import ( + "math/rand" + checksumtest "github.com/nspcc-dev/neofs-sdk-go/checksum/test" - oid "github.com/nspcc-dev/neofs-sdk-go/object/id" oidtest "github.com/nspcc-dev/neofs-sdk-go/object/id/test" "github.com/nspcc-dev/neofs-sdk-go/storagegroup" ) @@ -10,11 +11,9 @@ import ( // StorageGroup returns random storagegroup.StorageGroup. func StorageGroup() storagegroup.StorageGroup { var x storagegroup.StorageGroup - - x.SetExpirationEpoch(66) - x.SetValidationDataSize(322) + x.SetValidationDataSize(rand.Uint64()) x.SetValidationDataHash(checksumtest.Checksum()) - x.SetMembers([]oid.ID{oidtest.ID(), oidtest.ID()}) + x.SetMembers(oidtest.NIDs(1 + rand.Int()%3)) return x } diff --git a/storagegroup/test/generate_test.go b/storagegroup/test/generate_test.go new file mode 100644 index 000000000..e95a6f285 --- /dev/null +++ b/storagegroup/test/generate_test.go @@ -0,0 +1,18 @@ +package storagegrouptest_test + +import ( + "testing" + + "github.com/nspcc-dev/neofs-sdk-go/storagegroup" + storagegrouptest "github.com/nspcc-dev/neofs-sdk-go/storagegroup/test" + "github.com/stretchr/testify/require" +) + +func TestStorageGroup(t *testing.T) { + sg := storagegrouptest.StorageGroup() + require.NotEqual(t, sg, storagegrouptest.StorageGroup()) + + var sg2 storagegroup.StorageGroup + require.NoError(t, sg2.Unmarshal(sg.Marshal())) + require.Equal(t, sg, sg2) +} diff --git a/user/example_test.go b/user/example_test.go deleted file mode 100644 index 246407af5..000000000 --- a/user/example_test.go +++ /dev/null @@ -1,60 +0,0 @@ -package user_test - -import ( - "github.com/nspcc-dev/neo-go/pkg/util" - apiGoRefs "github.com/nspcc-dev/neofs-api-go/v2/refs" - "github.com/nspcc-dev/neofs-sdk-go/user" -) - -// NeoFS user identification is compatible with Neo accounts. -func ExampleID_SetScriptHash() { - // import "github.com/nspcc-dev/neo-go/pkg/util" - var id user.ID - - var scriptHash util.Uint160 // user account in NeoFS - id.SetScriptHash(scriptHash) -} - -// ID is compatible with the NeoFS Smart Contract API. -func ExampleID_WalletBytes() { - var id user.ID - // ... - - wallet := id.WalletBytes() - _ = wallet - - // use wallet in call -} - -// Encoding mechanisms are used to transfer identifiers on client. -func ExampleID_EncodeToString() { - var id user.ID - // ... - - _ = id.EncodeToString() -} - -// Encoding mechanisms are used to transfer identifiers on server. -func ExampleID_DecodeString() { - var id user.ID - // ... - - var s string - _ = id.DecodeString(s) -} - -// Instances can be also used to process NeoFS API V2 protocol messages with [https://github.com/nspcc-dev/neofs-api] package. -func ExampleID_marshalling() { - // import apiGoRefs "github.com/nspcc-dev/neofs-api-go/v2/refs" - - // On the client side. - - var id user.ID - var msg apiGoRefs.OwnerID - id.WriteToV2(&msg) - // *send message* - - // On the server side. - - _ = id.ReadFromV2(msg) -} diff --git a/user/id.go b/user/id.go index f706e0ea7..e7f7d44fe 100644 --- a/user/id.go +++ b/user/id.go @@ -9,108 +9,116 @@ import ( "github.com/nspcc-dev/neo-go/pkg/crypto/hash" "github.com/nspcc-dev/neo-go/pkg/encoding/address" "github.com/nspcc-dev/neo-go/pkg/util" - "github.com/nspcc-dev/neofs-api-go/v2/refs" + "github.com/nspcc-dev/neofs-sdk-go/api/refs" ) +// IDSize is a size of NeoFS user ID in bytes. +const IDSize = 25 + // ID identifies users of the NeoFS system. // -// ID is mutually compatible with github.com/nspcc-dev/neofs-api-go/v2/refs.OwnerID -// message. See ReadFromV2 / WriteToV2 methods. +// ID implements built-in comparable interface. +// +// ID is mutually compatible with [refs.OwnerID] message. See [ID.ReadFromV2] / +// [ID.WriteToV2] methods. // -// Instances can be created using built-in var declaration. Zero ID is not valid, -// so it MUST be initialized using some modifying function (e.g. SetScriptHash, etc.). -type ID struct { - w []byte +// Instances can be created using built-in var declaration. Zero ID is not +// valid, so it MUST be initialized using [NewID] or +// [ResolveFromECDSAPublicKey]. +type ID [IDSize]byte + +// NewID returns the user ID for his wallet address scripthash. +func NewID(scriptHash util.Uint160) ID { + var id ID + id[0] = address.Prefix + copy(id[1:], scriptHash.BytesBE()) + copy(id[21:], hash.Checksum(id[:21])) + return id } -// ReadFromV2 reads ID from the refs.OwnerID message. Returns an error if -// the message is malformed according to the NeoFS API V2 protocol. -// -// See also WriteToV2. -func (x *ID) ReadFromV2(m refs.OwnerID) error { - w := m.GetValue() - if len(w) != 25 { - return fmt.Errorf("invalid length %d, expected 25", len(w)) +func (x *ID) decodeBinary(b []byte) error { + if len(b) != IDSize { + return fmt.Errorf("invalid value length %d", len(b)) } - if w[0] != address.NEO3Prefix { - return fmt.Errorf("invalid prefix byte 0x%X, expected 0x%X", w[0], address.NEO3Prefix) + if b[0] != address.NEO3Prefix { + return fmt.Errorf("invalid prefix byte 0x%X, expected 0x%X", b[0], address.NEO3Prefix) } - if !bytes.Equal(w[21:], hash.Checksum(w[:21])) { - return errors.New("checksum mismatch") + if !bytes.Equal(b[21:], hash.Checksum(b[:21])) { + return errors.New("value checksum mismatch") } - x.w = w + copy(x[:], b) return nil } -// WriteToV2 writes ID to the refs.OwnerID message. -// The message must not be nil. +// ReadFromV2 reads ID from the [refs.OwnerID] message. Returns an error if the +// message is malformed according to the NeoFS API V2 protocol. The message must +// not be nil. // -// See also ReadFromV2. -func (x ID) WriteToV2(m *refs.OwnerID) { - m.SetValue(x.w) -} - -// SetScriptHash forms user ID from wallet address scripthash. -func (x *ID) SetScriptHash(scriptHash util.Uint160) { - if cap(x.w) < 25 { - x.w = make([]byte, 25) - } else if len(x.w) < 25 { - x.w = x.w[:25] +// ReadFromV2 is intended to be used by the NeoFS API V2 client/server +// implementation only and is not expected to be directly used by applications. +// +// See also [ID.WriteToV2]. +func (x *ID) ReadFromV2(m *refs.OwnerID) error { + if len(m.Value) == 0 { + return errors.New("missing value field") } - - x.w[0] = address.Prefix - copy(x.w[1:], scriptHash.BytesBE()) - copy(x.w[21:], hash.Checksum(x.w[:21])) + return x.decodeBinary(m.Value) } -// WalletBytes returns NeoFS user ID as Neo3 wallet address in a binary format. +// WriteToV2 writes ID to the [refs.OwnerID] message of the NeoFS API protocol. // -// The value returned shares memory with the structure itself, so changing it can lead to data corruption. -// Make a copy if you need to change it. +// WriteToV2 is intended to be used by the NeoFS API V2 client/server +// implementation only and is not expected to be directly used by applications. // -// See also Neo3 wallet docs. -func (x ID) WalletBytes() []byte { - return x.w +// See also [ID.ReadFromV2]. +func (x ID) WriteToV2(m *refs.OwnerID) { + m.Value = x[:] } // EncodeToString encodes ID into NeoFS API V2 protocol string. // -// See also DecodeString. +// Zero ID is base58 encoding of [IDSize] zeros. +// +// See also [ID.DecodeString]. func (x ID) EncodeToString() string { - return base58.Encode(x.w) + return base58.Encode(x[:]) } -// DecodeString decodes NeoFS API V2 protocol string. Returns an error -// if s is malformed. +// DecodeString decodes string into ID according to NeoFS API protocol. Returns +// an error if s is malformed. // -// DecodeString always changes the ID. -// -// See also EncodeToString. +// See also [ID.EncodeToString]. func (x *ID) DecodeString(s string) error { - var err error - - x.w, err = base58.Decode(s) - if err != nil { - return fmt.Errorf("decode base58: %w", err) + var b []byte + if s != "" { + var err error + b, err = base58.Decode(s) + if err != nil { + return fmt.Errorf("decode base58: %w", err) + } } - - return nil + return x.decodeBinary(b) } -// String implements fmt.Stringer. +// String implements [fmt.Stringer]. // // String is designed to be human-readable, and its format MAY differ between -// SDK versions. String MAY return same result as EncodeToString. String MUST NOT -// be used to encode ID into NeoFS protocol string. +// SDK versions. String MAY return same result as [ID.EncodeToString]. String +// MUST NOT be used to encode ID into NeoFS protocol string. func (x ID) String() string { return x.EncodeToString() } -// Equals defines a comparison relation between two ID instances. -func (x ID) Equals(x2 ID) bool { - return bytes.Equal(x.w, x2.w) +// IsZero checks whether ID is zero. +func (x ID) IsZero() bool { + for i := range x { + if x[i] != 0 { + return false + } + } + return true } diff --git a/user/id_test.go b/user/id_test.go index 97db311cc..7d00b3add 100644 --- a/user/id_test.go +++ b/user/id_test.go @@ -1,123 +1,150 @@ package user_test import ( - "bytes" "math/rand" "testing" - "github.com/mr-tron/base58" + "github.com/nspcc-dev/neo-go/pkg/crypto/hash" "github.com/nspcc-dev/neo-go/pkg/util" - "github.com/nspcc-dev/neofs-api-go/v2/refs" + "github.com/nspcc-dev/neofs-sdk-go/api/refs" "github.com/nspcc-dev/neofs-sdk-go/user" usertest "github.com/nspcc-dev/neofs-sdk-go/user/test" "github.com/stretchr/testify/require" ) -func TestID_WalletBytes(t *testing.T) { - var scriptHash util.Uint160 - //nolint:staticcheck - rand.Read(scriptHash[:]) - - var id user.ID - id.SetScriptHash(scriptHash) - - w := id.WalletBytes() - - var m refs.OwnerID - m.SetValue(w) - - err := id.ReadFromV2(m) - require.NoError(t, err) +func TestIDComparable(t *testing.T) { + id1 := usertest.ID() + require.True(t, id1 == id1) + id2 := usertest.ChangeID(id1) + require.NotEqual(t, id1, id2) + require.False(t, id1 == id2) } -func TestID_SetScriptHash(t *testing.T) { - var scriptHash util.Uint160 - //nolint:staticcheck - rand.Read(scriptHash[:]) - - var id user.ID - id.SetScriptHash(scriptHash) - - var m refs.OwnerID - id.WriteToV2(&m) - - var id2 user.ID - - err := id2.ReadFromV2(m) - require.NoError(t, err) - - require.True(t, id2.Equals(id)) +func TestID_String(t *testing.T) { + id1 := usertest.ID() + id2 := usertest.ChangeID(id1) + require.NotEmpty(t, id1.String()) + require.Equal(t, id1.String(), id1.String()) + require.NotEqual(t, id1.String(), id2.String()) } -func TestV2_ID(t *testing.T) { - id := usertest.ID(t) - var m refs.OwnerID +func TestNewID(t *testing.T) { + var h util.Uint160 + rand.Read(h[:]) + + id := user.NewID(h) + require.EqualValues(t, 0x35, id[0]) + require.Equal(t, h[:], id[1:21]) + require.Equal(t, hash.Checksum(append([]byte{0x35}, h[:]...)), id[21:]) + + var msg refs.OwnerID + id.WriteToV2(&msg) + b := []byte{0x35} + b = append(b, h[:]...) + b = append(b, hash.Checksum(b)...) + require.Equal(t, b, msg.Value) var id2 user.ID + require.NoError(t, id2.ReadFromV2(&msg)) + require.Equal(t, id2, id) - t.Run("OK", func(t *testing.T) { - id.WriteToV2(&m) + var id3 user.ID + require.NoError(t, id3.DecodeString(id.EncodeToString())) + require.Equal(t, id3, id) +} - err := id2.ReadFromV2(m) - require.NoError(t, err) - require.True(t, id2.Equals(id)) +func TestID_ReadFromV2(t *testing.T) { + t.Run("missing fields", func(t *testing.T) { + t.Run("value", func(t *testing.T) { + id := usertest.ID() + var m refs.OwnerID + + id.WriteToV2(&m) + m.Value = nil + require.ErrorContains(t, id.ReadFromV2(&m), "missing value field") + m.Value = []byte{} + require.ErrorContains(t, id.ReadFromV2(&m), "missing value field") + }) }) - - val := m.GetValue() - - t.Run("invalid size", func(t *testing.T) { - m.SetValue(val[:24]) - - err := id2.ReadFromV2(m) - require.Error(t, err) + t.Run("invalid fields", func(t *testing.T) { + t.Run("value", func(t *testing.T) { + id := usertest.ID() + var m refs.OwnerID + + id.WriteToV2(&m) + m.Value = make([]byte, 24) + require.ErrorContains(t, id.ReadFromV2(&m), "invalid value length 24") + m.Value = make([]byte, 26) + require.ErrorContains(t, id.ReadFromV2(&m), "invalid value length 26") + m.Value = make([]byte, 25) + m.Value[0] = 0x34 + copy(m.Value[21:], hash.Checksum(m.Value[:21])) + require.ErrorContains(t, id.ReadFromV2(&m), "invalid prefix byte 0x34, expected 0x35") + m.Value[0] = 0x35 // checksum become broken + require.ErrorContains(t, id.ReadFromV2(&m), "value checksum mismatch") + }) }) +} - t.Run("invalid prefix", func(t *testing.T) { - val := bytes.Clone(val) - val[0]++ - - m.SetValue(val) +func TestID_DecodeString(t *testing.T) { + var id user.ID - err := id2.ReadFromV2(m) - require.Error(t, err) + const zeroIDString = "1111111111111111111111111" + require.Equal(t, zeroIDString, id.EncodeToString()) + id = usertest.ChangeID(id) + require.Error(t, id.DecodeString(zeroIDString)) + + var bin = [25]byte{53, 72, 207, 149, 237, 209, 4, 50, 202, 244, 5, 17, 110, 81, 232, 216, 209, 218, 182, 113, 105, 9, 34, 73, 84} + const str = "NSYxWsYXboEjX31dMqVSC9aUTJnCaSP4v7" + require.NoError(t, id.DecodeString(str)) + require.Equal(t, str, id.EncodeToString()) + require.EqualValues(t, bin, id) + + var binOther = [25]byte{53, 3, 151, 68, 134, 6, 234, 16, 104, 195, 133, 153, 6, 87, 28, 4, 41, 45, 67, 87, 121, 135, 107, 199, 252} + const strOther = "NLExRbNWNQpc6pEyBJXrFaPqcXMFFHChQo" + require.NoError(t, id.DecodeString(strOther)) + require.Equal(t, strOther, id.EncodeToString()) + require.EqualValues(t, binOther, id) + + t.Run("invalid", func(t *testing.T) { + var id user.ID + for _, testCase := range []struct{ input, err string }{ + {input: "not_a_base58_string", err: "decode base58"}, + {input: "", err: "invalid value length 0"}, + {input: "MzB7cw27FZpBdcLiexQN6DSgriAa9WERdM", err: "invalid prefix byte 0x34, expected 0x35"}, + {input: "NgkmJY4DsqYsomn5y1TKz4GoBHmW55ZrwC", err: "value checksum mismatch"}, + } { + require.ErrorContains(t, id.DecodeString(testCase.input), testCase.err, testCase) + } }) - - t.Run("invalid checksum", func(t *testing.T) { - val := bytes.Clone(val) - val[21]++ - - m.SetValue(val) - - err := id2.ReadFromV2(m) - require.Error(t, err) + t.Run("encoding", func(t *testing.T) { + t.Run("api", func(t *testing.T) { + var src, dst user.ID + var msg refs.OwnerID + + require.NoError(t, dst.DecodeString(str)) + + src.WriteToV2(&msg) + require.Equal(t, make([]byte, 25), msg.Value) + require.Error(t, dst.ReadFromV2(&msg)) + + require.NoError(t, src.DecodeString(str)) + + src.WriteToV2(&msg) + require.Equal(t, bin[:], msg.Value) + err := dst.ReadFromV2(&msg) + require.NoError(t, err) + require.EqualValues(t, bin, dst) + require.Equal(t, str, dst.EncodeToString()) + }) }) } -func TestID_EncodeToString(t *testing.T) { - id := usertest.ID(t) - - s := id.EncodeToString() - - _, err := base58.Decode(s) - require.NoError(t, err) - - var id2 user.ID - - err = id2.DecodeString(s) - require.NoError(t, err) - - require.Equal(t, id, id2) - - err = id2.DecodeString("_") // any invalid bas58 string - require.Error(t, err) -} - -func TestID_Equal(t *testing.T) { - id1 := usertest.ID(t) - id2 := usertest.ID(t) - id3 := id1 - - require.True(t, id1.Equals(id1)) // self-equality - require.True(t, id1.Equals(id3)) - require.True(t, id3.Equals(id1)) // commutativity - require.False(t, id1.Equals(id2)) +func TestID_IsZero(t *testing.T) { + var id user.ID + require.True(t, id.IsZero()) + for i := range id { + id2 := id + id2[i]++ + require.False(t, id2.IsZero()) + } } diff --git a/user/signer.go b/user/signer.go index 27b934bdc..d54aa189f 100644 --- a/user/signer.go +++ b/user/signer.go @@ -36,10 +36,7 @@ func NewSigner(s neofscrypto.Signer, usr ID) Signer { } func newAutoResolvedSigner(s neofscrypto.Signer, pubKey ecdsa.PublicKey) Signer { - var id ID - id.SetScriptHash((*keys.PublicKey)(&pubKey).GetScriptHash()) - - return NewSigner(s, id) + return NewSigner(s, ResolveFromECDSAPublicKey(pubKey)) } // NewAutoIDSigner returns [Signer] with neofscrypto.ECDSA_SHA512 @@ -59,8 +56,5 @@ func NewAutoIDSignerRFC6979(key ecdsa.PrivateKey) Signer { // ResolveFromECDSAPublicKey resolves [ID] from the given [ecdsa.PublicKey]. func ResolveFromECDSAPublicKey(pk ecdsa.PublicKey) ID { - var id ID - id.SetScriptHash((*keys.PublicKey)(&pk).GetScriptHash()) - - return id + return NewID((*keys.PublicKey)(&pk).GetScriptHash()) } diff --git a/user/test/id.go b/user/test/id.go index 97566775d..df6b4df9e 100644 --- a/user/test/id.go +++ b/user/test/id.go @@ -1,17 +1,109 @@ package usertest import ( - "testing" + "crypto/ecdsa" + "crypto/rand" + "encoding/hex" + "errors" + "fmt" - "github.com/nspcc-dev/neofs-sdk-go/crypto/test" + "github.com/nspcc-dev/neo-go/pkg/crypto/keys" + "github.com/nspcc-dev/neo-go/pkg/util" + neofsecdsa "github.com/nspcc-dev/neofs-sdk-go/crypto/ecdsa" "github.com/nspcc-dev/neofs-sdk-go/user" ) // ID returns random user.ID. -func ID(tb testing.TB) user.ID { - var x user.ID - s := test.RandomSignerRFC6979(tb) - x = s.UserID() +func ID() user.ID { + var h util.Uint160 + rand.Read(h[:]) + return user.NewID(h) +} + +// ChangeID returns user ID other than the given one. +func ChangeID(id user.ID) user.ID { + id[0]++ + return id +} + +// NIDs returns n random user.ID instances. +func NIDs(n int) []user.ID { + res := make([]user.ID, n) + for i := range res { + res[i] = ID() + } + return res +} + +// User represents NeoFS user credentials. +type User struct { + user.Signer + SignerRFC6979, SignerWalletConnect user.Signer + + ID user.ID + PrivateKey ecdsa.PrivateKey + PublicKeyBytes []byte +} + +// TwoUsers returns two packs of different static user ECDSA credentials. +func TwoUsers() (User, User) { + const strUser1 = "NPo3isCPDA6S7EVfpATq1NaBkzVhxWX9FQ" + const hexPrivKey1 = "ebd6154e7e1b85050647a480ee3a97355a6ea7fe6e80ce7b4c27dbf00d599a2a" + const hexPubKey1 = "03ffb26e9e499ae96024730b5b73d32182d97a46a51a68ffcf0a5db6b56a67057f" + const strUser2 = "NZrDLV77VcxTCWhpjR2DuD8zg5iyn8rYtW" + const hexPrivKey2 = "23d2ba98afa05c06dc9186efc40c568b0333c33f790865ef68e7aa20cde5354d" + const hexPubKey2 = "021e6c4449951ff3170a92ba9d9af88facceedf1dfcb42d0d2afa0e72b3d67c372" + + k1, err := keys.NewPrivateKeyFromHex(hexPrivKey1) + if err != nil { + panic(fmt.Errorf("unexpected decode private key failure: %w", err)) + } + k2, err := keys.NewPrivateKeyFromHex(hexPrivKey2) + if err != nil { + panic(fmt.Errorf("unexpected decode private key failure: %w", err)) + } + + bPubKey1, err := hex.DecodeString(hexPubKey1) + if err != nil { + panic(fmt.Errorf("unexpected decode HEX failure: %w", err)) + } + bPubKey2, err := hex.DecodeString(hexPubKey2) + if err != nil { + panic(fmt.Errorf("unexpected decode HEX failure: %w", err)) + } + + var usr1, usr2 user.ID + if err = usr1.DecodeString(strUser1); err != nil { + panic(fmt.Errorf("unexpected decode string user ID failure: %w", err)) + } + if err = usr2.DecodeString(strUser2); err != nil { + panic(fmt.Errorf("unexpected decode string user ID failure: %w", err)) + } + + return User{ + ID: user.ResolveFromECDSAPublicKey(k1.PrivateKey.PublicKey), + PrivateKey: k1.PrivateKey, + PublicKeyBytes: bPubKey1, + Signer: user.NewAutoIDSigner(k1.PrivateKey), + SignerRFC6979: user.NewAutoIDSignerRFC6979(k1.PrivateKey), + SignerWalletConnect: user.NewSigner(neofsecdsa.SignerWalletConnect(k1.PrivateKey), usr1), + }, User{ + ID: user.ResolveFromECDSAPublicKey(k2.PrivateKey.PublicKey), + PrivateKey: k2.PrivateKey, + PublicKeyBytes: bPubKey2, + Signer: user.NewAutoIDSigner(k2.PrivateKey), + SignerRFC6979: user.NewAutoIDSignerRFC6979(k2.PrivateKey), + SignerWalletConnect: user.NewSigner(neofsecdsa.SignerWalletConnect(k2.PrivateKey), usr2), + } +} + +type failedSigner struct { + user.Signer +} + +func (x failedSigner) Sign([]byte) ([]byte, error) { return nil, errors.New("failed to sign") } - return x +// FailSigner returns wraps s to always return error from Sign method. +func FailSigner(s user.Signer) user.Signer { + return failedSigner{s} } diff --git a/user/test/id_test.go b/user/test/id_test.go new file mode 100644 index 000000000..d538ad21f --- /dev/null +++ b/user/test/id_test.go @@ -0,0 +1,61 @@ +package usertest_test + +import ( + "crypto/ecdsa" + "crypto/elliptic" + "math/rand" + "testing" + + "github.com/nspcc-dev/neofs-sdk-go/api/refs" + cidtest "github.com/nspcc-dev/neofs-sdk-go/container/id/test" + "github.com/nspcc-dev/neofs-sdk-go/user" + usertest "github.com/nspcc-dev/neofs-sdk-go/user/test" + "github.com/stretchr/testify/require" +) + +func TestID(t *testing.T) { + id := usertest.ID() + require.NotEqual(t, id, usertest.ID()) + + var m refs.OwnerID + id.WriteToV2(&m) + var id2 user.ID + require.NoError(t, id2.ReadFromV2(&m)) +} + +func TestChangeID(t *testing.T) { + id := usertest.ID() + require.NotEqual(t, id, usertest.ChangeID(id)) +} + +func TestNIDs(t *testing.T) { + n := rand.Int() % 10 + require.Len(t, cidtest.NIDs(n), n) +} + +func TestTwoUsers(t *testing.T) { + usr1, usr2 := usertest.TwoUsers() + require.NotEqual(t, usr1.UserID(), usr2.UserID()) + require.NotEqual(t, usr1.PrivateKey, usr2.PrivateKey) + require.NotEqual(t, usr1.PublicKeyBytes, usr2.PublicKeyBytes) + require.NotEqual(t, usr1.Signer, usr2.Signer) + require.NotEqual(t, usr1.SignerRFC6979, usr2.SignerRFC6979) + require.NotEqual(t, usr1.SignerWalletConnect, usr2.SignerWalletConnect) + + var pubKey1, pubKey2 ecdsa.PublicKey + pubKey1.Curve = elliptic.P256() + pubKey1.X, pubKey1.Y = elliptic.UnmarshalCompressed(pubKey1.Curve, usr1.PublicKeyBytes) + require.NotNil(t, pubKey1.X) + require.Equal(t, usr1.PrivateKey.PublicKey, pubKey1) + pubKey2.Curve = elliptic.P256() + pubKey2.X, pubKey2.Y = elliptic.UnmarshalCompressed(pubKey2.Curve, usr2.PublicKeyBytes) + require.NotNil(t, pubKey2.X) + require.Equal(t, usr2.PrivateKey.PublicKey, pubKey2) + require.Equal(t, usr1.UserID(), user.ResolveFromECDSAPublicKey(pubKey1)) + require.Equal(t, usr1.UserID(), usr1.UserID()) + require.Equal(t, usr1.UserID(), usr1.SignerRFC6979.UserID()) + require.Equal(t, usr1.UserID(), usr1.SignerWalletConnect.UserID()) + require.Equal(t, usr2.UserID(), user.ResolveFromECDSAPublicKey(pubKey2)) + require.Equal(t, usr2.UserID(), usr2.SignerRFC6979.UserID()) + require.Equal(t, usr2.UserID(), usr2.SignerWalletConnect.UserID()) +} diff --git a/version/test/generate_test.go b/version/test/generate_test.go new file mode 100644 index 000000000..385076af3 --- /dev/null +++ b/version/test/generate_test.go @@ -0,0 +1,20 @@ +package versiontest_test + +import ( + "testing" + + "github.com/nspcc-dev/neofs-sdk-go/api/refs" + "github.com/nspcc-dev/neofs-sdk-go/version" + versiontest "github.com/nspcc-dev/neofs-sdk-go/version/test" + "github.com/stretchr/testify/require" +) + +func TestVersion(t *testing.T) { + v := versiontest.Version() + require.NotEqual(t, v, versiontest.Version()) + + var m refs.Version + v.WriteToV2(&m) + var v2 version.Version + require.NoError(t, v2.ReadFromV2(&m)) +} diff --git a/version/version.go b/version/version.go index ac9e4b58a..88df547a6 100644 --- a/version/version.go +++ b/version/version.go @@ -3,69 +3,69 @@ package version import ( "fmt" - "github.com/nspcc-dev/neofs-api-go/v2/refs" + "github.com/nspcc-dev/neofs-sdk-go/api/refs" ) // Version represents revision number in SemVer scheme. // -// Version is mutually compatible with github.com/nspcc-dev/neofs-api-go/v2/refs.Version -// message. See ReadFromV2 / WriteToV2 methods. +// Version implements built-in comparable interface. // -// Instances can be created using built-in var declaration. -// -// Note that direct typecast is not safe and may result in loss of compatibility: +// Version is mutually compatible with [refs.Version] message. See +// [Version.ReadFromV2] / [Version.WriteToV2] methods. // -// _ = Version(refs.Version{}) // not recommended -type Version refs.Version +// Instances can be created using built-in var declaration. +type Version struct{ mjr, mnr uint32 } const sdkMjr, sdkMnr = 2, 16 -// Current returns Version instance that initialized to the -// latest supported NeoFS API revision number in SDK. -func Current() (v Version) { - v.SetMajor(sdkMjr) - v.SetMinor(sdkMnr) - return v -} +// Current is the latest NeoFS API protocol version supported by this library. +var Current = Version{sdkMjr, sdkMnr} // Major returns major number of the revision. -func (v *Version) Major() uint32 { - return (*refs.Version)(v).GetMajor() +func (v Version) Major() uint32 { + return v.mjr } // SetMajor sets major number of the revision. func (v *Version) SetMajor(val uint32) { - (*refs.Version)(v).SetMajor(val) + v.mjr = val } // Minor returns minor number of the revision. -func (v *Version) Minor() uint32 { - return (*refs.Version)(v).GetMinor() +func (v Version) Minor() uint32 { + return v.mnr } // SetMinor sets minor number of the revision. func (v *Version) SetMinor(val uint32) { - (*refs.Version)(v).SetMinor(val) + v.mnr = val } -// WriteToV2 writes Version to the refs.Version message. -// The message must not be nil. +// WriteToV2 writes Version to the [refs.Version] message of the NeoFS API +// protocol. +// +// WriteToV2 is intended to be used by the NeoFS API V2 client/server +// implementation only and is not expected to be directly used by applications. // -// See also ReadFromV2. +// See also [Version.ReadFromV2]. func (v Version) WriteToV2(m *refs.Version) { - *m = (refs.Version)(v) + m.Major, m.Minor = v.mjr, v.mnr } -// ReadFromV2 reads Version from the refs.Version message. Checks if the message -// conforms to NeoFS API V2 protocol. +// ReadFromV2 reads Version from the [refs.Version] message. Returns an error if +// the message is malformed according to the NeoFS API V2 protocol. The message +// must not be nil. // -// See also WriteToV2. -func (v *Version) ReadFromV2(m refs.Version) error { - *v = Version(m) +// ReadFromV2 is intended to be used by the NeoFS API V2 client/server +// implementation only and is not expected to be directly used by applications. +// +// See also [Version.WriteToV2]. +func (v *Version) ReadFromV2(m *refs.Version) error { + v.mjr, v.mnr = m.Major, m.Minor return nil } -// String implements fmt.Stringer. +// String implements [fmt.Stringer]. // // String is designed to be human-readable, and its format MAY differ between // SDK versions. @@ -78,35 +78,3 @@ func (v Version) String() string { func EncodeToString(v Version) string { return fmt.Sprintf("v%d.%d", v.Major(), v.Minor()) } - -// Equal returns true if versions are identical. -func (v Version) Equal(v2 Version) bool { - return v.Major() == v2.Major() && - v.Minor() == v2.Minor() -} - -// MarshalJSON encodes Version into a JSON format of the NeoFS API -// protocol (Protocol Buffers JSON). -// -// See also UnmarshalJSON. -func (v Version) MarshalJSON() ([]byte, error) { - var m refs.Version - v.WriteToV2(&m) - - return m.MarshalJSON() -} - -// UnmarshalJSON decodes NeoFS API protocol JSON format into the Version -// (Protocol Buffers JSON). Returns an error describing a format violation. -// -// See also MarshalJSON. -func (v *Version) UnmarshalJSON(data []byte) error { - var m refs.Version - - err := m.UnmarshalJSON(data) - if err != nil { - return err - } - - return v.ReadFromV2(m) -} diff --git a/version/version_test.go b/version/version_test.go index 7dda8ce46..093c778f5 100644 --- a/version/version_test.go +++ b/version/version_test.go @@ -1,65 +1,75 @@ -package version +package version_test import ( "math/rand" "testing" - "github.com/nspcc-dev/neofs-api-go/v2/refs" + "github.com/nspcc-dev/neofs-sdk-go/api/refs" + "github.com/nspcc-dev/neofs-sdk-go/version" + versiontest "github.com/nspcc-dev/neofs-sdk-go/version/test" "github.com/stretchr/testify/require" ) -func TestNewVersion(t *testing.T) { - t.Run("default values", func(t *testing.T) { - var v Version +func TestVersionComparable(t *testing.T) { + v1 := versiontest.Version() + require.True(t, v1 == v1) + v2 := versiontest.Version() + require.NotEqual(t, v1, v2) + require.False(t, v1 == v2) +} + +func TestCurrent(t *testing.T) { + require.EqualValues(t, 2, version.Current.Major()) + require.EqualValues(t, 16, version.Current.Minor()) +} - // check initial values - require.Zero(t, v.Major()) - require.Zero(t, v.Minor()) +func testVersionField(t *testing.T, get func(version.Version) uint32, set func(*version.Version, uint32), getAPI func(*refs.Version) uint32) { + var v version.Version - // convert to v2 message - var vV2 refs.Version - v.WriteToV2(&vV2) + require.Zero(t, get(v)) - require.Zero(t, vV2.GetMajor()) - require.Zero(t, vV2.GetMinor()) - }) + val := rand.Uint32() + set(&v, val) + require.EqualValues(t, val, get(v)) - t.Run("setting values", func(t *testing.T) { - var v Version + otherVal := val + 1 + set(&v, otherVal) + require.EqualValues(t, otherVal, get(v)) - var mjr, mnr uint32 = 1, 2 + t.Run("encoding", func(t *testing.T) { + t.Run("api", func(t *testing.T) { + var src, dst version.Version + var msg refs.Version - v.SetMajor(mjr) - v.SetMinor(mnr) - require.Equal(t, mjr, v.Major()) - require.Equal(t, mnr, v.Minor()) + set(&dst, val) - // convert to v2 message - var ver refs.Version - v.WriteToV2(&ver) + src.WriteToV2(&msg) + require.Zero(t, getAPI(&msg)) + require.NoError(t, dst.ReadFromV2(&msg)) + require.Zero(t, get(dst)) - require.Equal(t, mjr, ver.GetMajor()) - require.Equal(t, mnr, ver.GetMinor()) + set(&src, val) + src.WriteToV2(&msg) + require.EqualValues(t, val, getAPI(&msg)) + err := dst.ReadFromV2(&msg) + require.NoError(t, err) + require.EqualValues(t, val, get(dst)) + }) }) } -func TestSDKVersion(t *testing.T) { - v := Current() - - require.Equal(t, uint32(sdkMjr), v.Major()) - require.Equal(t, uint32(sdkMnr), v.Minor()) +func TestVersion_SetMajor(t *testing.T) { + testVersionField(t, version.Version.Major, (*version.Version).SetMajor, (*refs.Version).GetMajor) } -func TestVersion_MarshalJSON(t *testing.T) { - var v Version - v.SetMajor(rand.Uint32()) - v.SetMinor(rand.Uint32()) - - data, err := v.MarshalJSON() - require.NoError(t, err) - - var v2 Version - require.NoError(t, v2.UnmarshalJSON(data)) +func TestVersion_SetMinor(t *testing.T) { + testVersionField(t, version.Version.Minor, (*version.Version).SetMinor, (*refs.Version).GetMinor) +} - require.Equal(t, v, v2) +func TestEncodeToString(t *testing.T) { + require.Equal(t, "v2.16", version.EncodeToString(version.Current)) + var v version.Version + v.SetMajor(578393) + v.SetMinor(405609340) + require.Equal(t, "v578393.405609340", version.EncodeToString(v)) } diff --git a/waiter/container_eacl.go b/waiter/container_eacl.go index 8aaada4ac..085218955 100644 --- a/waiter/container_eacl.go +++ b/waiter/container_eacl.go @@ -45,15 +45,12 @@ func (w ContainerSetEACLWaiter) ContainerSetEACL(ctx context.Context, table eacl return fmt.Errorf("container setEacl: %w", err) } - contID, ok := table.CID() - if !ok { + contID := table.LimitedContainer() + if contID.IsZero() { return client.ErrMissingEACLContainer } - newBinary, err := table.Marshal() - if err != nil { - return fmt.Errorf("newTable.Marshal: %w", err) - } + newBinary := table.Marshal() var prmEacl client.PrmContainerEACL @@ -67,12 +64,7 @@ func (w ContainerSetEACLWaiter) ContainerSetEACL(ctx context.Context, table eacl return fmt.Errorf("ContainerEACL: %w", err) } - actualBinary, err := actualTable.Marshal() - if err != nil { - return fmt.Errorf("table.Marshal: %w", err) - } - - if bytes.Equal(newBinary, actualBinary) { + if bytes.Equal(newBinary, actualTable.Marshal()) { return nil } diff --git a/waiter/example_waiter_test.go b/waiter/example_waiter_test.go index 65157fb2c..7c68e9a8e 100644 --- a/waiter/example_waiter_test.go +++ b/waiter/example_waiter_test.go @@ -28,22 +28,18 @@ func ExampleNewWaiter() { account := signer.UserID() - var cont container.Container var pp netmap.PlacementPolicy var rd netmap.ReplicaDescriptor - // prepare container. - cont.Init() - cont.SetBasicACL(acl.PublicRW) - cont.SetOwner(account) - cont.SetName(strconv.FormatInt(time.Now().UnixNano(), 16)) - cont.SetCreationTime(time.Now()) - // prepare placement policy. pp.SetContainerBackupFactor(1) rd.SetNumberOfObjects(1) pp.SetReplicas([]netmap.ReplicaDescriptor{rd}) - cont.SetPlacementPolicy(pp) + + // prepare container. + cont := container.New(account, acl.PublicRW, pp) + cont.SetName(strconv.FormatInt(time.Now().UnixNano(), 16)) + cont.SetCreationTime(time.Now()) // prepare pool. opts := pool.InitParameters{}