diff --git a/internal/core/crdt/base.go b/internal/core/crdt/base.go index 7d158af6b8..31a8b6fc63 100644 --- a/internal/core/crdt/base.go +++ b/internal/core/crdt/base.go @@ -18,13 +18,13 @@ import ( "github.com/sourcenetwork/defradb/datastore" "github.com/sourcenetwork/defradb/errors" - "github.com/sourcenetwork/defradb/internal/core" + "github.com/sourcenetwork/defradb/internal/keys" ) func setPriority( ctx context.Context, store datastore.DSReaderWriter, - key core.DataStoreKey, + key keys.DataStoreKey, priority uint64, ) error { prioK := key.WithPriorityFlag() @@ -41,7 +41,7 @@ func setPriority( func getPriority( ctx context.Context, store datastore.DSReaderWriter, - key core.DataStoreKey, + key keys.DataStoreKey, ) (uint64, error) { pKey := key.WithPriorityFlag() pbuf, err := store.Get(ctx, pKey.ToDS()) diff --git a/internal/core/crdt/base_test.go b/internal/core/crdt/base_test.go index 3943d375ce..29e2ac9283 100644 --- a/internal/core/crdt/base_test.go +++ b/internal/core/crdt/base_test.go @@ -17,7 +17,7 @@ import ( ds "github.com/ipfs/go-datastore" "github.com/sourcenetwork/defradb/datastore" - "github.com/sourcenetwork/defradb/internal/core" + "github.com/sourcenetwork/defradb/internal/keys" ) func newDS() datastore.DSReaderWriter { @@ -25,14 +25,14 @@ func newDS() datastore.DSReaderWriter { } func TestBaseCRDTvalueKey(t *testing.T) { - vk := core.DataStoreKey{}.WithDocID("mykey").WithValueFlag() + vk := keys.DataStoreKey{}.WithDocID("mykey").WithValueFlag() if vk.ToString() != "/v/mykey" { t.Errorf("Incorrect valueKey. Have %v, want %v", vk.ToString(), "/v/mykey") } } func TestBaseCRDTprioryKey(t *testing.T) { - pk := core.DataStoreKey{}.WithDocID("mykey").WithPriorityFlag() + pk := keys.DataStoreKey{}.WithDocID("mykey").WithPriorityFlag() if pk.ToString() != "/p/mykey" { t.Errorf("Incorrect priorityKey. Have %v, want %v", pk.ToString(), "/p/mykey") } @@ -42,13 +42,13 @@ func TestBaseCRDTSetGetPriority(t *testing.T) { store := newDS() ctx := context.Background() - err := setPriority(ctx, store, core.DataStoreKey{}.WithDocID("mykey"), 10) + err := setPriority(ctx, store, keys.DataStoreKey{}.WithDocID("mykey"), 10) if err != nil { t.Errorf("baseCRDT failed to set Priority. err: %v", err) return } - priority, err := getPriority(ctx, store, core.DataStoreKey{}.WithDocID("mykey")) + priority, err := getPriority(ctx, store, keys.DataStoreKey{}.WithDocID("mykey")) if err != nil { t.Errorf("baseCRDT failed to get priority. err: %v", err) return diff --git a/internal/core/crdt/composite.go b/internal/core/crdt/composite.go index 1886b4574d..510d47d7e4 100644 --- a/internal/core/crdt/composite.go +++ b/internal/core/crdt/composite.go @@ -22,6 +22,7 @@ import ( "github.com/sourcenetwork/defradb/errors" "github.com/sourcenetwork/defradb/internal/core" "github.com/sourcenetwork/defradb/internal/db/base" + "github.com/sourcenetwork/defradb/internal/keys" ) // CompositeDAGDelta represents a delta-state update made of sub-MerkleCRDTs. @@ -77,20 +78,20 @@ func (delta *CompositeDAGDelta) SetPriority(prio uint64) { // CompositeDAG is a CRDT structure that is used to track a collection of sub MerkleCRDTs. type CompositeDAG struct { store datastore.DSReaderWriter - key core.DataStoreKey + key keys.DataStoreKey // schemaVersionKey is the schema version datastore key at the time of commit. // // It can be used to identify the collection datastructure state at the time of commit. - schemaVersionKey core.CollectionSchemaVersionKey + schemaVersionKey keys.CollectionSchemaVersionKey } var _ core.ReplicatedData = (*CompositeDAG)(nil) func NewCompositeDAG( store datastore.DSReaderWriter, - schemaVersionKey core.CollectionSchemaVersionKey, - key core.DataStoreKey, + schemaVersionKey keys.CollectionSchemaVersionKey, + key keys.DataStoreKey, ) CompositeDAG { return CompositeDAG{ store: store, @@ -125,7 +126,7 @@ func (c CompositeDAG) Merge(ctx context.Context, delta core.Delta) error { // We cannot rely on the dagDelta.Status here as it may have been deleted locally, this is not // reflected in `dagDelta.Status` if sourced via P2P. Updates synced via P2P should not undelete // the local representation of the document. - versionKey := c.key.WithValueFlag().WithFieldID(core.DATASTORE_DOC_VERSION_FIELD_ID) + versionKey := c.key.WithValueFlag().WithFieldID(keys.DATASTORE_DOC_VERSION_FIELD_ID) objectMarker, err := c.store.Get(ctx, c.key.ToPrimaryDataStoreKey().ToDS()) hasObjectMarker := !errors.Is(err, ds.ErrNotFound) if err != nil && hasObjectMarker { @@ -159,7 +160,7 @@ func (c CompositeDAG) Merge(ctx context.Context, delta core.Delta) error { return nil } -func (c CompositeDAG) deleteWithPrefix(ctx context.Context, key core.DataStoreKey) error { +func (c CompositeDAG) deleteWithPrefix(ctx context.Context, key keys.DataStoreKey) error { q := query.Query{ Prefix: key.ToString(), } @@ -168,12 +169,12 @@ func (c CompositeDAG) deleteWithPrefix(ctx context.Context, key core.DataStoreKe if e.Error != nil { return err } - dsKey, err := core.NewDataStoreKey(e.Key) + dsKey, err := keys.NewDataStoreKey(e.Key) if err != nil { return err } - if dsKey.InstanceType == core.ValueKey { + if dsKey.InstanceType == keys.ValueKey { err = c.store.Put(ctx, dsKey.WithDeletedFlag().ToDS(), e.Value) if err != nil { return err diff --git a/internal/core/crdt/counter.go b/internal/core/crdt/counter.go index 966052b5f0..1f287eb700 100644 --- a/internal/core/crdt/counter.go +++ b/internal/core/crdt/counter.go @@ -26,6 +26,7 @@ import ( "github.com/sourcenetwork/defradb/errors" "github.com/sourcenetwork/defradb/internal/core" "github.com/sourcenetwork/defradb/internal/db/base" + "github.com/sourcenetwork/defradb/internal/keys" ) type Incrementable interface { @@ -78,12 +79,12 @@ func (delta *CounterDelta) SetPriority(prio uint64) { // of an Int and Float data types that ensures convergence. type Counter struct { store datastore.DSReaderWriter - key core.DataStoreKey + key keys.DataStoreKey // schemaVersionKey is the schema version datastore key at the time of commit. // // It can be used to identify the collection datastructure state at the time of commit. - schemaVersionKey core.CollectionSchemaVersionKey + schemaVersionKey keys.CollectionSchemaVersionKey // fieldName holds the name of the field hosting this CRDT, if this is a field level // commit. @@ -98,8 +99,8 @@ var _ core.ReplicatedData = (*Counter)(nil) // NewCounter returns a new instance of the Counter with the given ID. func NewCounter( store datastore.DSReaderWriter, - schemaVersionKey core.CollectionSchemaVersionKey, - key core.DataStoreKey, + schemaVersionKey keys.CollectionSchemaVersionKey, + key keys.DataStoreKey, fieldName string, allowDecrement bool, kind client.ScalarKind, @@ -205,7 +206,7 @@ func (c Counter) CType() client.CType { func validateAndIncrement[T Incrementable]( ctx context.Context, store datastore.DSReaderWriter, - key core.DataStoreKey, + key keys.DataStoreKey, valueAsBytes []byte, allowDecrement bool, ) ([]byte, error) { @@ -230,7 +231,7 @@ func validateAndIncrement[T Incrementable]( func getCurrentValue[T Incrementable]( ctx context.Context, store datastore.DSReaderWriter, - key core.DataStoreKey, + key keys.DataStoreKey, ) (T, error) { curValue, err := store.Get(ctx, key.ToDS()) if err != nil { diff --git a/internal/core/crdt/lwwreg.go b/internal/core/crdt/lwwreg.go index 75e1244374..4fdf58ab47 100644 --- a/internal/core/crdt/lwwreg.go +++ b/internal/core/crdt/lwwreg.go @@ -21,6 +21,7 @@ import ( "github.com/sourcenetwork/defradb/errors" "github.com/sourcenetwork/defradb/internal/core" "github.com/sourcenetwork/defradb/internal/db/base" + "github.com/sourcenetwork/defradb/internal/keys" ) // LWWRegDelta is a single delta operation for an LWWRegister @@ -66,12 +67,12 @@ func (delta *LWWRegDelta) SetPriority(prio uint64) { // of an arbitrary data type that ensures convergence. type LWWRegister struct { store datastore.DSReaderWriter - key core.DataStoreKey + key keys.DataStoreKey // schemaVersionKey is the schema version datastore key at the time of commit. // // It can be used to identify the collection datastructure state at the time of commit. - schemaVersionKey core.CollectionSchemaVersionKey + schemaVersionKey keys.CollectionSchemaVersionKey // fieldName holds the name of the field hosting this CRDT, if this is a field level // commit. @@ -83,8 +84,8 @@ var _ core.ReplicatedData = (*LWWRegister)(nil) // NewLWWRegister returns a new instance of the LWWReg with the given ID. func NewLWWRegister( store datastore.DSReaderWriter, - schemaVersionKey core.CollectionSchemaVersionKey, - key core.DataStoreKey, + schemaVersionKey keys.CollectionSchemaVersionKey, + key keys.DataStoreKey, fieldName string, ) LWWRegister { return LWWRegister{ diff --git a/internal/core/crdt/lwwreg_test.go b/internal/core/crdt/lwwreg_test.go index 087adecb70..c3ce9992b5 100644 --- a/internal/core/crdt/lwwreg_test.go +++ b/internal/core/crdt/lwwreg_test.go @@ -18,6 +18,7 @@ import ( "github.com/sourcenetwork/defradb/datastore" "github.com/sourcenetwork/defradb/internal/core" + "github.com/sourcenetwork/defradb/internal/keys" ) func newMockStore() datastore.DSReaderWriter { @@ -26,8 +27,8 @@ func newMockStore() datastore.DSReaderWriter { func setupLWWRegister() LWWRegister { store := newMockStore() - key := core.DataStoreKey{DocID: "AAAA-BBBB"} - return NewLWWRegister(store, core.CollectionSchemaVersionKey{}, key, "") + key := keys.DataStoreKey{DocID: "AAAA-BBBB"} + return NewLWWRegister(store, keys.CollectionSchemaVersionKey{}, key, "") } func TestLWWRegisterAddDelta(t *testing.T) { diff --git a/internal/core/data.go b/internal/core/data.go index 11a627d18b..122d255c5f 100644 --- a/internal/core/data.go +++ b/internal/core/data.go @@ -10,27 +10,31 @@ package core -import "strings" +import ( + "strings" + + "github.com/sourcenetwork/defradb/internal/keys" +) // Span is a range of keys from [Start, End). type Span interface { // Start returns the starting key of the Span. - Start() DataStoreKey + Start() keys.DataStoreKey // End returns the ending key of the Span. - End() DataStoreKey + End() keys.DataStoreKey // Compare returns -1 if the provided span is less, 0 if it is equal, and 1 if its greater. Compare(Span) SpanComparisonResult } type span struct { - start DataStoreKey - end DataStoreKey + start keys.DataStoreKey + end keys.DataStoreKey } var _ Span = span{} // NewSpan creates a new Span from the provided start and end keys. -func NewSpan(start, end DataStoreKey) Span { +func NewSpan(start, end keys.DataStoreKey) Span { return span{ start: start, end: end, @@ -38,12 +42,12 @@ func NewSpan(start, end DataStoreKey) Span { } // Start returns the starting key of the Span. -func (s span) Start() DataStoreKey { +func (s span) Start() keys.DataStoreKey { return s.start } // End returns the ending key of the Span. -func (s span) End() DataStoreKey { +func (s span) End() keys.DataStoreKey { return s.end } @@ -136,7 +140,7 @@ func (this span) Compare(other Span) SpanComparisonResult { return After } -func isAdjacent(this DataStoreKey, other DataStoreKey) bool { +func isAdjacent(this keys.DataStoreKey, other keys.DataStoreKey) bool { return len(this.ToString()) == len(other.ToString()) && (this.PrefixEnd().ToString() == other.ToString() || this.ToString() == other.PrefixEnd().ToString()) diff --git a/internal/core/data_test.go b/internal/core/data_test.go index ae3580528f..d55851b795 100644 --- a/internal/core/data_test.go +++ b/internal/core/data_test.go @@ -14,6 +14,8 @@ import ( "testing" "github.com/stretchr/testify/assert" + + "github.com/sourcenetwork/defradb/internal/keys" ) func TestMergeAscending_ReturnsEmpty_GivenEmpty(t *testing.T) { @@ -25,8 +27,8 @@ func TestMergeAscending_ReturnsEmpty_GivenEmpty(t *testing.T) { } func TestMergeAscending_ReturnsSingle_GivenSingle(t *testing.T) { - start1 := MustNewDataStoreKey("/1/p/0/k1") - end1 := MustNewDataStoreKey("/1/p/0/k2") + start1 := keys.MustNewDataStoreKey("/1/p/0/k1") + end1 := keys.MustNewDataStoreKey("/1/p/0/k2") input := []Span{NewSpan(start1, end1)} result := MergeAscending(input) @@ -37,10 +39,10 @@ func TestMergeAscending_ReturnsSingle_GivenSingle(t *testing.T) { } func TestMergeAscending_ReturnsSecondBeforeFirst_GivenKeysInReverseOrder(t *testing.T) { - start1 := MustNewDataStoreKey("/1/p/0/k4") - end1 := MustNewDataStoreKey("/1/p/0/k5") - start2 := MustNewDataStoreKey("/1/p/0/k1") - end2 := MustNewDataStoreKey("/1/p/0/k2") + start1 := keys.MustNewDataStoreKey("/1/p/0/k4") + end1 := keys.MustNewDataStoreKey("/1/p/0/k5") + start2 := keys.MustNewDataStoreKey("/1/p/0/k1") + end2 := keys.MustNewDataStoreKey("/1/p/0/k2") input := []Span{ NewSpan(start1, end1), @@ -57,12 +59,12 @@ func TestMergeAscending_ReturnsSecondBeforeFirst_GivenKeysInReverseOrder(t *test } func TestMergeAscending_ReturnsItemsInOrder_GivenKeysInMixedOrder(t *testing.T) { - start1 := MustNewDataStoreKey("/1/p/0/k1") - end1 := MustNewDataStoreKey("/1/p/0/k2") - start2 := MustNewDataStoreKey("/1/p/0/k7") - end2 := MustNewDataStoreKey("/1/p/0/k8") - start3 := MustNewDataStoreKey("/1/p/0/k4") - end3 := MustNewDataStoreKey("/1/p/0/k5") + start1 := keys.MustNewDataStoreKey("/1/p/0/k1") + end1 := keys.MustNewDataStoreKey("/1/p/0/k2") + start2 := keys.MustNewDataStoreKey("/1/p/0/k7") + end2 := keys.MustNewDataStoreKey("/1/p/0/k8") + start3 := keys.MustNewDataStoreKey("/1/p/0/k4") + end3 := keys.MustNewDataStoreKey("/1/p/0/k5") input := []Span{ NewSpan(start1, end1), @@ -83,10 +85,10 @@ func TestMergeAscending_ReturnsItemsInOrder_GivenKeysInMixedOrder(t *testing.T) } func TestMergeAscending_ReturnsSingle_GivenStartBeforeEndEqualToStart(t *testing.T) { - start1 := MustNewDataStoreKey("/1/p/0/k3") - end1 := MustNewDataStoreKey("/1/p/0/k4") - start2 := MustNewDataStoreKey("/1/p/0/k1") - end2 := MustNewDataStoreKey("/1/p/0/k3") + start1 := keys.MustNewDataStoreKey("/1/p/0/k3") + end1 := keys.MustNewDataStoreKey("/1/p/0/k4") + start2 := keys.MustNewDataStoreKey("/1/p/0/k1") + end2 := keys.MustNewDataStoreKey("/1/p/0/k3") input := []Span{ NewSpan(start1, end1), NewSpan(start2, end2), @@ -100,10 +102,10 @@ func TestMergeAscending_ReturnsSingle_GivenStartBeforeEndEqualToStart(t *testing } func TestMergeAscending_ReturnsSingle_GivenStartBeforeEndAdjacentToStart(t *testing.T) { - start1 := MustNewDataStoreKey("/1/p/0/k3") - end1 := MustNewDataStoreKey("/1/p/0/k4") - start2 := MustNewDataStoreKey("/1/p/0/k1") - end2 := MustNewDataStoreKey("/1/p/0/k2") + start1 := keys.MustNewDataStoreKey("/1/p/0/k3") + end1 := keys.MustNewDataStoreKey("/1/p/0/k4") + start2 := keys.MustNewDataStoreKey("/1/p/0/k1") + end2 := keys.MustNewDataStoreKey("/1/p/0/k2") input := []Span{ NewSpan(start1, end1), NewSpan(start2, end2), @@ -117,10 +119,10 @@ func TestMergeAscending_ReturnsSingle_GivenStartBeforeEndAdjacentToStart(t *test } func TestMergeAscending_ReturnsSingle_GivenStartBeforeEndWithin(t *testing.T) { - start1 := MustNewDataStoreKey("/1/p/0/k3") - end1 := MustNewDataStoreKey("/1/p/0/k4") - start2 := MustNewDataStoreKey("/1/p/0/k1") - end2 := MustNewDataStoreKey("/1/p/0/k3.5") + start1 := keys.MustNewDataStoreKey("/1/p/0/k3") + end1 := keys.MustNewDataStoreKey("/1/p/0/k4") + start2 := keys.MustNewDataStoreKey("/1/p/0/k1") + end2 := keys.MustNewDataStoreKey("/1/p/0/k3.5") input := []Span{ NewSpan(start1, end1), NewSpan(start2, end2), @@ -134,10 +136,10 @@ func TestMergeAscending_ReturnsSingle_GivenStartBeforeEndWithin(t *testing.T) { } func TestMergeAscending_ReturnsSingle_GivenStartPrefixesEndWithin(t *testing.T) { - start1 := MustNewDataStoreKey("/1/p/0/k1.1") - end1 := MustNewDataStoreKey("/1/p/0/k3") - start2 := MustNewDataStoreKey("/1/p/0/k1") - end2 := MustNewDataStoreKey("/1/p/0/k2.5") + start1 := keys.MustNewDataStoreKey("/1/p/0/k1.1") + end1 := keys.MustNewDataStoreKey("/1/p/0/k3") + start2 := keys.MustNewDataStoreKey("/1/p/0/k1") + end2 := keys.MustNewDataStoreKey("/1/p/0/k2.5") input := []Span{ NewSpan(start1, end1), NewSpan(start2, end2), @@ -151,10 +153,10 @@ func TestMergeAscending_ReturnsSingle_GivenStartPrefixesEndWithin(t *testing.T) } func TestMergeAscending_ReturnsSingle_GivenStartBeforeEndWithinEndPrefix(t *testing.T) { - start1 := MustNewDataStoreKey("/1/p/0/k3") - end1 := MustNewDataStoreKey("/1/p/0/k4") - start2 := MustNewDataStoreKey("/1/p/0/k1") - end2 := MustNewDataStoreKey("/1/p/0/k4.5") + start1 := keys.MustNewDataStoreKey("/1/p/0/k3") + end1 := keys.MustNewDataStoreKey("/1/p/0/k4") + start2 := keys.MustNewDataStoreKey("/1/p/0/k1") + end2 := keys.MustNewDataStoreKey("/1/p/0/k4.5") input := []Span{ NewSpan(start1, end1), NewSpan(start2, end2), @@ -168,10 +170,10 @@ func TestMergeAscending_ReturnsSingle_GivenStartBeforeEndWithinEndPrefix(t *test } func TestMergeAscending_ReturnsSingle_GivenStartPrefixesEndWithinEndPrefix(t *testing.T) { - start1 := MustNewDataStoreKey("/1/p/0/k1.1") - end1 := MustNewDataStoreKey("/1/p/0/k3") - start2 := MustNewDataStoreKey("/1/p/0/k1") - end2 := MustNewDataStoreKey("/1/p/0/k3.5") + start1 := keys.MustNewDataStoreKey("/1/p/0/k1.1") + end1 := keys.MustNewDataStoreKey("/1/p/0/k3") + start2 := keys.MustNewDataStoreKey("/1/p/0/k1") + end2 := keys.MustNewDataStoreKey("/1/p/0/k3.5") input := []Span{ NewSpan(start1, end1), NewSpan(start2, end2), @@ -185,10 +187,10 @@ func TestMergeAscending_ReturnsSingle_GivenStartPrefixesEndWithinEndPrefix(t *te } func TestMergeAscending_ReturnsSingle_GivenStartBeforeEndEqual(t *testing.T) { - start1 := MustNewDataStoreKey("/1/p/0/k3") - end1 := MustNewDataStoreKey("/1/p/0/k4") - start2 := MustNewDataStoreKey("/1/p/0/k1") - end2 := MustNewDataStoreKey("/1/p/0/k4") + start1 := keys.MustNewDataStoreKey("/1/p/0/k3") + end1 := keys.MustNewDataStoreKey("/1/p/0/k4") + start2 := keys.MustNewDataStoreKey("/1/p/0/k1") + end2 := keys.MustNewDataStoreKey("/1/p/0/k4") input := []Span{ NewSpan(start1, end1), NewSpan(start2, end2), @@ -202,10 +204,10 @@ func TestMergeAscending_ReturnsSingle_GivenStartBeforeEndEqual(t *testing.T) { } func TestMergeAscending_ReturnsSingle_GivenStartBeforeEndAdjacentAndBefore(t *testing.T) { - start1 := MustNewDataStoreKey("/1/p/0/k3") - end1 := MustNewDataStoreKey("/1/p/0/k5") - start2 := MustNewDataStoreKey("/1/p/0/k1") - end2 := MustNewDataStoreKey("/1/p/0/k4") + start1 := keys.MustNewDataStoreKey("/1/p/0/k3") + end1 := keys.MustNewDataStoreKey("/1/p/0/k5") + start2 := keys.MustNewDataStoreKey("/1/p/0/k1") + end2 := keys.MustNewDataStoreKey("/1/p/0/k4") input := []Span{ NewSpan(start1, end1), NewSpan(start2, end2), @@ -219,10 +221,10 @@ func TestMergeAscending_ReturnsSingle_GivenStartBeforeEndAdjacentAndBefore(t *te } func TestMergeAscending_ReturnsSingle_GivenStartBeforeEndAdjacentAndGreater(t *testing.T) { - start1 := MustNewDataStoreKey("/1/p/0/k3") - end1 := MustNewDataStoreKey("/1/p/0/k4") - start2 := MustNewDataStoreKey("/1/p/0/k1") - end2 := MustNewDataStoreKey("/1/p/0/k5") + start1 := keys.MustNewDataStoreKey("/1/p/0/k3") + end1 := keys.MustNewDataStoreKey("/1/p/0/k4") + start2 := keys.MustNewDataStoreKey("/1/p/0/k1") + end2 := keys.MustNewDataStoreKey("/1/p/0/k5") input := []Span{ NewSpan(start1, end1), NewSpan(start2, end2), @@ -236,10 +238,10 @@ func TestMergeAscending_ReturnsSingle_GivenStartBeforeEndAdjacentAndGreater(t *t } func TestMergeAscending_ReturnsSingle_GivenStartPrefixesEndEqual(t *testing.T) { - start1 := MustNewDataStoreKey("/1/p/0/k1.1") - end1 := MustNewDataStoreKey("/1/p/0/k3") - start2 := MustNewDataStoreKey("/1/p/0/k1") - end2 := MustNewDataStoreKey("/1/p/0/k3") + start1 := keys.MustNewDataStoreKey("/1/p/0/k1.1") + end1 := keys.MustNewDataStoreKey("/1/p/0/k3") + start2 := keys.MustNewDataStoreKey("/1/p/0/k1") + end2 := keys.MustNewDataStoreKey("/1/p/0/k3") input := []Span{ NewSpan(start1, end1), NewSpan(start2, end2), @@ -253,10 +255,10 @@ func TestMergeAscending_ReturnsSingle_GivenStartPrefixesEndEqual(t *testing.T) { } func TestMergeAscending_ReturnsSingle_GivenStartPrefixesEndAdjacentAndBefore(t *testing.T) { - start1 := MustNewDataStoreKey("/1/p/0/k1.1") - end1 := MustNewDataStoreKey("/1/p/0/k3") - start2 := MustNewDataStoreKey("/1/p/0/k1") - end2 := MustNewDataStoreKey("/1/p/0/k2") + start1 := keys.MustNewDataStoreKey("/1/p/0/k1.1") + end1 := keys.MustNewDataStoreKey("/1/p/0/k3") + start2 := keys.MustNewDataStoreKey("/1/p/0/k1") + end2 := keys.MustNewDataStoreKey("/1/p/0/k2") input := []Span{ NewSpan(start1, end1), NewSpan(start2, end2), @@ -270,10 +272,10 @@ func TestMergeAscending_ReturnsSingle_GivenStartPrefixesEndAdjacentAndBefore(t * } func TestMergeAscending_ReturnsSingle_GivenStartPrefixesEndAdjacentAndAfter(t *testing.T) { - start1 := MustNewDataStoreKey("/1/p/0/k1.1") - end1 := MustNewDataStoreKey("/1/p/0/k3") - start2 := MustNewDataStoreKey("/1/p/0/k1") - end2 := MustNewDataStoreKey("/1/p/0/k4") + start1 := keys.MustNewDataStoreKey("/1/p/0/k1.1") + end1 := keys.MustNewDataStoreKey("/1/p/0/k3") + start2 := keys.MustNewDataStoreKey("/1/p/0/k1") + end2 := keys.MustNewDataStoreKey("/1/p/0/k4") input := []Span{ NewSpan(start1, end1), NewSpan(start2, end2), @@ -287,16 +289,16 @@ func TestMergeAscending_ReturnsSingle_GivenStartPrefixesEndAdjacentAndAfter(t *t } func TestMergeAscending_ReturnsMiddleSpansMerged_GivenSpanCoveringMiddleSpans(t *testing.T) { - start1 := MustNewDataStoreKey("/1/p/0/k1") - end1 := MustNewDataStoreKey("/1/p/0/k2") - start2 := MustNewDataStoreKey("/1/p/0/k6") - end2 := MustNewDataStoreKey("/1/p/0/k7") - start3 := MustNewDataStoreKey("/1/p/0/k9") - end3 := MustNewDataStoreKey("/1/p/0/ka") - start4 := MustNewDataStoreKey("/1/p/0/kc") - end4 := MustNewDataStoreKey("/1/p/0/kd") - start5 := MustNewDataStoreKey("/1/p/0/k4") - end5 := MustNewDataStoreKey("/1/p/0/ka") + start1 := keys.MustNewDataStoreKey("/1/p/0/k1") + end1 := keys.MustNewDataStoreKey("/1/p/0/k2") + start2 := keys.MustNewDataStoreKey("/1/p/0/k6") + end2 := keys.MustNewDataStoreKey("/1/p/0/k7") + start3 := keys.MustNewDataStoreKey("/1/p/0/k9") + end3 := keys.MustNewDataStoreKey("/1/p/0/ka") + start4 := keys.MustNewDataStoreKey("/1/p/0/kc") + end4 := keys.MustNewDataStoreKey("/1/p/0/kd") + start5 := keys.MustNewDataStoreKey("/1/p/0/k4") + end5 := keys.MustNewDataStoreKey("/1/p/0/ka") input := []Span{ NewSpan(start1, end1), NewSpan(start2, end2), @@ -318,10 +320,10 @@ func TestMergeAscending_ReturnsMiddleSpansMerged_GivenSpanCoveringMiddleSpans(t } func TestMergeAscending_ReturnsSingle_GivenStartEqualEndWithin(t *testing.T) { - start1 := MustNewDataStoreKey("/1/p/0/k1") - end1 := MustNewDataStoreKey("/1/p/0/k2") - start2 := MustNewDataStoreKey("/1/p/0/k1") - end2 := MustNewDataStoreKey("/1/p/0/k1.5") + start1 := keys.MustNewDataStoreKey("/1/p/0/k1") + end1 := keys.MustNewDataStoreKey("/1/p/0/k2") + start2 := keys.MustNewDataStoreKey("/1/p/0/k1") + end2 := keys.MustNewDataStoreKey("/1/p/0/k1.5") input := []Span{ NewSpan(start1, end1), NewSpan(start2, end2), @@ -335,10 +337,10 @@ func TestMergeAscending_ReturnsSingle_GivenStartEqualEndWithin(t *testing.T) { } func TestMergeAscending_ReturnsSingle_GivenStartEqualEndWithinEndPrefix(t *testing.T) { - start1 := MustNewDataStoreKey("/1/p/0/k1") - end1 := MustNewDataStoreKey("/1/p/0/k2") - start2 := MustNewDataStoreKey("/1/p/0/k1") - end2 := MustNewDataStoreKey("/1/p/0/k2.5") + start1 := keys.MustNewDataStoreKey("/1/p/0/k1") + end1 := keys.MustNewDataStoreKey("/1/p/0/k2") + start2 := keys.MustNewDataStoreKey("/1/p/0/k1") + end2 := keys.MustNewDataStoreKey("/1/p/0/k2.5") input := []Span{ NewSpan(start1, end1), NewSpan(start2, end2), @@ -352,8 +354,8 @@ func TestMergeAscending_ReturnsSingle_GivenStartEqualEndWithinEndPrefix(t *testi } func TestMergeAscending_ReturnsSingle_GivenDuplicates(t *testing.T) { - start1 := MustNewDataStoreKey("/1/p/0/k1") - end1 := MustNewDataStoreKey("/1/p/0/k2") + start1 := keys.MustNewDataStoreKey("/1/p/0/k1") + end1 := keys.MustNewDataStoreKey("/1/p/0/k2") input := []Span{ NewSpan(start1, end1), NewSpan(start1, end1), @@ -367,10 +369,10 @@ func TestMergeAscending_ReturnsSingle_GivenDuplicates(t *testing.T) { } func TestMergeAscending_ReturnsSingle_GivenStartWithinEndWithin(t *testing.T) { - start1 := MustNewDataStoreKey("/1/p/0/k1") - end1 := MustNewDataStoreKey("/1/p/0/k2") - start2 := MustNewDataStoreKey("/1/p/0/k1.2") - end2 := MustNewDataStoreKey("/1/p/0/k1.5") + start1 := keys.MustNewDataStoreKey("/1/p/0/k1") + end1 := keys.MustNewDataStoreKey("/1/p/0/k2") + start2 := keys.MustNewDataStoreKey("/1/p/0/k1.2") + end2 := keys.MustNewDataStoreKey("/1/p/0/k1.5") input := []Span{ NewSpan(start1, end1), NewSpan(start2, end2), @@ -384,10 +386,10 @@ func TestMergeAscending_ReturnsSingle_GivenStartWithinEndWithin(t *testing.T) { } func TestMergeAscending_ReturnsSingle_GivenStartWithinEndWithinEndPrefix(t *testing.T) { - start1 := MustNewDataStoreKey("/1/p/0/k1") - end1 := MustNewDataStoreKey("/1/p/0/k2") - start2 := MustNewDataStoreKey("/1/p/0/k1.2") - end2 := MustNewDataStoreKey("/1/p/0/k2.5") + start1 := keys.MustNewDataStoreKey("/1/p/0/k1") + end1 := keys.MustNewDataStoreKey("/1/p/0/k2") + start2 := keys.MustNewDataStoreKey("/1/p/0/k1.2") + end2 := keys.MustNewDataStoreKey("/1/p/0/k2.5") input := []Span{ NewSpan(start1, end1), NewSpan(start2, end2), @@ -401,10 +403,10 @@ func TestMergeAscending_ReturnsSingle_GivenStartWithinEndWithinEndPrefix(t *test } func TestMergeAscending_ReturnsSingle_GivenStartWithinEndEqual(t *testing.T) { - start1 := MustNewDataStoreKey("/1/p/0/k1") - end1 := MustNewDataStoreKey("/1/p/0/k2") - start2 := MustNewDataStoreKey("/1/p/0/k1.2") - end2 := MustNewDataStoreKey("/1/p/0/k2") + start1 := keys.MustNewDataStoreKey("/1/p/0/k1") + end1 := keys.MustNewDataStoreKey("/1/p/0/k2") + start2 := keys.MustNewDataStoreKey("/1/p/0/k1.2") + end2 := keys.MustNewDataStoreKey("/1/p/0/k2") input := []Span{ NewSpan(start1, end1), NewSpan(start2, end2), @@ -418,10 +420,10 @@ func TestMergeAscending_ReturnsSingle_GivenStartWithinEndEqual(t *testing.T) { } func TestMergeAscending_ReturnsSingle_GivenStartWithinEndAdjacentAndBefore(t *testing.T) { - start1 := MustNewDataStoreKey("/1/p/0/k1") - end1 := MustNewDataStoreKey("/1/p/0/k3") - start2 := MustNewDataStoreKey("/1/p/0/k1.2") - end2 := MustNewDataStoreKey("/1/p/0/k2") + start1 := keys.MustNewDataStoreKey("/1/p/0/k1") + end1 := keys.MustNewDataStoreKey("/1/p/0/k3") + start2 := keys.MustNewDataStoreKey("/1/p/0/k1.2") + end2 := keys.MustNewDataStoreKey("/1/p/0/k2") input := []Span{ NewSpan(start1, end1), NewSpan(start2, end2), @@ -435,10 +437,10 @@ func TestMergeAscending_ReturnsSingle_GivenStartWithinEndAdjacentAndBefore(t *te } func TestMergeAscending_ReturnsSingle_GivenStartWithinEndAdjacentAndAfter(t *testing.T) { - start1 := MustNewDataStoreKey("/1/p/0/k1") - end1 := MustNewDataStoreKey("/1/p/0/k3") - start2 := MustNewDataStoreKey("/1/p/0/k1.2") - end2 := MustNewDataStoreKey("/1/p/0/k4") + start1 := keys.MustNewDataStoreKey("/1/p/0/k1") + end1 := keys.MustNewDataStoreKey("/1/p/0/k3") + start2 := keys.MustNewDataStoreKey("/1/p/0/k1.2") + end2 := keys.MustNewDataStoreKey("/1/p/0/k4") input := []Span{ NewSpan(start1, end1), NewSpan(start2, end2), @@ -454,16 +456,16 @@ func TestMergeAscending_ReturnsSingle_GivenStartWithinEndAdjacentAndAfter(t *tes func TestMergeAscending_ReturnsMiddleSpansMerged_GivenStartEqualEndAfterSpanCoveringMiddleSpans( t *testing.T, ) { - start1 := MustNewDataStoreKey("/1/p/0/k1") - end1 := MustNewDataStoreKey("/1/p/0/k2") - start2 := MustNewDataStoreKey("/1/p/0/k4") - end2 := MustNewDataStoreKey("/1/p/0/k5") - start3 := MustNewDataStoreKey("/1/p/0/k7") - end3 := MustNewDataStoreKey("/1/p/0/k8") - start4 := MustNewDataStoreKey("/1/p/0/kc") - end4 := MustNewDataStoreKey("/1/p/0/kd") - start5 := MustNewDataStoreKey("/1/p/0/k4") // equal to start2 - end5 := MustNewDataStoreKey("/1/p/0/ka") + start1 := keys.MustNewDataStoreKey("/1/p/0/k1") + end1 := keys.MustNewDataStoreKey("/1/p/0/k2") + start2 := keys.MustNewDataStoreKey("/1/p/0/k4") + end2 := keys.MustNewDataStoreKey("/1/p/0/k5") + start3 := keys.MustNewDataStoreKey("/1/p/0/k7") + end3 := keys.MustNewDataStoreKey("/1/p/0/k8") + start4 := keys.MustNewDataStoreKey("/1/p/0/kc") + end4 := keys.MustNewDataStoreKey("/1/p/0/kd") + start5 := keys.MustNewDataStoreKey("/1/p/0/k4") // equal to start2 + end5 := keys.MustNewDataStoreKey("/1/p/0/ka") input := []Span{ NewSpan(start1, end1), NewSpan(start2, end2), @@ -487,16 +489,16 @@ func TestMergeAscending_ReturnsMiddleSpansMerged_GivenStartEqualEndAfterSpanCove func TestMergeAscending_ReturnsMiddleSpansMerged_GivenStartWithinEndAfterSpanCoveringMiddleSpans( t *testing.T, ) { - start1 := MustNewDataStoreKey("/1/p/0/k1") - end1 := MustNewDataStoreKey("/1/p/0/k2") - start2 := MustNewDataStoreKey("/1/p/0/k4") - end2 := MustNewDataStoreKey("/1/p/0/k5") - start3 := MustNewDataStoreKey("/1/p/0/k7") - end3 := MustNewDataStoreKey("/1/p/0/k8") - start4 := MustNewDataStoreKey("/1/p/0/kc") - end4 := MustNewDataStoreKey("/1/p/0/kd") - start5 := MustNewDataStoreKey("/1/p/0/k4.5") // within span2 - end5 := MustNewDataStoreKey("/1/p/0/ka") + start1 := keys.MustNewDataStoreKey("/1/p/0/k1") + end1 := keys.MustNewDataStoreKey("/1/p/0/k2") + start2 := keys.MustNewDataStoreKey("/1/p/0/k4") + end2 := keys.MustNewDataStoreKey("/1/p/0/k5") + start3 := keys.MustNewDataStoreKey("/1/p/0/k7") + end3 := keys.MustNewDataStoreKey("/1/p/0/k8") + start4 := keys.MustNewDataStoreKey("/1/p/0/kc") + end4 := keys.MustNewDataStoreKey("/1/p/0/kd") + start5 := keys.MustNewDataStoreKey("/1/p/0/k4.5") // within span2 + end5 := keys.MustNewDataStoreKey("/1/p/0/ka") input := []Span{ NewSpan(start1, end1), NewSpan(start2, end2), @@ -519,16 +521,16 @@ func TestMergeAscending_ReturnsMiddleSpansMerged_GivenStartWithinEndAfterSpanCov func TestMergeAscending_ReturnsMiddleSpansMerged_GivenStartEqualToEndEndAfterSpanCoveringMiddleSpans( t *testing.T, ) { - start1 := MustNewDataStoreKey("/1/p/0/k1") - end1 := MustNewDataStoreKey("/1/p/0/k2") - start2 := MustNewDataStoreKey("/1/p/0/k4") - end2 := MustNewDataStoreKey("/1/p/0/k5") - start3 := MustNewDataStoreKey("/1/p/0/k7") - end3 := MustNewDataStoreKey("/1/p/0/k8") - start4 := MustNewDataStoreKey("/1/p/0/kc") - end4 := MustNewDataStoreKey("/1/p/0/kd") - start5 := MustNewDataStoreKey("/1/p/0/k5") // span2's end - end5 := MustNewDataStoreKey("/1/p/0/ka") + start1 := keys.MustNewDataStoreKey("/1/p/0/k1") + end1 := keys.MustNewDataStoreKey("/1/p/0/k2") + start2 := keys.MustNewDataStoreKey("/1/p/0/k4") + end2 := keys.MustNewDataStoreKey("/1/p/0/k5") + start3 := keys.MustNewDataStoreKey("/1/p/0/k7") + end3 := keys.MustNewDataStoreKey("/1/p/0/k8") + start4 := keys.MustNewDataStoreKey("/1/p/0/kc") + end4 := keys.MustNewDataStoreKey("/1/p/0/kd") + start5 := keys.MustNewDataStoreKey("/1/p/0/k5") // span2's end + end5 := keys.MustNewDataStoreKey("/1/p/0/ka") input := []Span{ NewSpan(start1, end1), NewSpan(start2, end2), @@ -551,16 +553,16 @@ func TestMergeAscending_ReturnsMiddleSpansMerged_GivenStartEqualToEndEndAfterSpa func TestMergeAscending_ReturnsMiddleSpansMerged_GivenStartAdjacentAndBeforeEndEndAfterSpanCoveringMiddleSpans( t *testing.T, ) { - start1 := MustNewDataStoreKey("/1/p/0/k1") - end1 := MustNewDataStoreKey("/1/p/0/k2") - start2 := MustNewDataStoreKey("/1/p/0/k4") - end2 := MustNewDataStoreKey("/1/p/0/k6") - start3 := MustNewDataStoreKey("/1/p/0/k8") - end3 := MustNewDataStoreKey("/1/p/0/k9") - start4 := MustNewDataStoreKey("/1/p/0/kd") - end4 := MustNewDataStoreKey("/1/p/0/ke") - start5 := MustNewDataStoreKey("/1/p/0/k5") // adjacent but before span2's end - end5 := MustNewDataStoreKey("/1/p/0/kb") + start1 := keys.MustNewDataStoreKey("/1/p/0/k1") + end1 := keys.MustNewDataStoreKey("/1/p/0/k2") + start2 := keys.MustNewDataStoreKey("/1/p/0/k4") + end2 := keys.MustNewDataStoreKey("/1/p/0/k6") + start3 := keys.MustNewDataStoreKey("/1/p/0/k8") + end3 := keys.MustNewDataStoreKey("/1/p/0/k9") + start4 := keys.MustNewDataStoreKey("/1/p/0/kd") + end4 := keys.MustNewDataStoreKey("/1/p/0/ke") + start5 := keys.MustNewDataStoreKey("/1/p/0/k5") // adjacent but before span2's end + end5 := keys.MustNewDataStoreKey("/1/p/0/kb") input := []Span{ NewSpan(start1, end1), NewSpan(start2, end2), @@ -583,16 +585,16 @@ func TestMergeAscending_ReturnsMiddleSpansMerged_GivenStartAdjacentAndBeforeEndE func TestMergeAscending_ReturnsMiddleSpansMerged_GivenStartAdjacentAndAfterEndEndAfterSpanCoveringMiddleSpans( t *testing.T, ) { - start1 := MustNewDataStoreKey("/1/p/0/k1") - end1 := MustNewDataStoreKey("/1/p/0/k2") - start2 := MustNewDataStoreKey("/1/p/0/k4") - end2 := MustNewDataStoreKey("/1/p/0/k5") - start3 := MustNewDataStoreKey("/1/p/0/k8") - end3 := MustNewDataStoreKey("/1/p/0/k9") - start4 := MustNewDataStoreKey("/1/p/0/kd") - end4 := MustNewDataStoreKey("/1/p/0/ke") - start5 := MustNewDataStoreKey("/1/p/0/k6") // adjacent and after span2's end - end5 := MustNewDataStoreKey("/1/p/0/kb") + start1 := keys.MustNewDataStoreKey("/1/p/0/k1") + end1 := keys.MustNewDataStoreKey("/1/p/0/k2") + start2 := keys.MustNewDataStoreKey("/1/p/0/k4") + end2 := keys.MustNewDataStoreKey("/1/p/0/k5") + start3 := keys.MustNewDataStoreKey("/1/p/0/k8") + end3 := keys.MustNewDataStoreKey("/1/p/0/k9") + start4 := keys.MustNewDataStoreKey("/1/p/0/kd") + end4 := keys.MustNewDataStoreKey("/1/p/0/ke") + start5 := keys.MustNewDataStoreKey("/1/p/0/k6") // adjacent and after span2's end + end5 := keys.MustNewDataStoreKey("/1/p/0/kb") input := []Span{ NewSpan(start1, end1), NewSpan(start2, end2), @@ -613,10 +615,10 @@ func TestMergeAscending_ReturnsMiddleSpansMerged_GivenStartAdjacentAndAfterEndEn } func TestMergeAscending_ReturnsTwoItems_GivenSecondItemAfterFirst(t *testing.T) { - start1 := MustNewDataStoreKey("/1/p/0/k1") - end1 := MustNewDataStoreKey("/1/p/0/k2") - start2 := MustNewDataStoreKey("/1/p/0/k4") - end2 := MustNewDataStoreKey("/1/p/0/k5") + start1 := keys.MustNewDataStoreKey("/1/p/0/k1") + end1 := keys.MustNewDataStoreKey("/1/p/0/k2") + start2 := keys.MustNewDataStoreKey("/1/p/0/k4") + end2 := keys.MustNewDataStoreKey("/1/p/0/k5") input := []Span{ NewSpan(start1, end1), NewSpan(start2, end2), @@ -632,10 +634,10 @@ func TestMergeAscending_ReturnsTwoItems_GivenSecondItemAfterFirst(t *testing.T) } func TestMergeAscending_ReturnsSingle_GivenStartAdjacentAndBeforeEndEndEqual(t *testing.T) { - start1 := MustNewDataStoreKey("/1/p/0/k3") - end1 := MustNewDataStoreKey("/1/p/0/k6") - start2 := MustNewDataStoreKey("/1/p/0/k5") - end2 := MustNewDataStoreKey("/1/p/0/k6") + start1 := keys.MustNewDataStoreKey("/1/p/0/k3") + end1 := keys.MustNewDataStoreKey("/1/p/0/k6") + start2 := keys.MustNewDataStoreKey("/1/p/0/k5") + end2 := keys.MustNewDataStoreKey("/1/p/0/k6") input := []Span{ NewSpan(start1, end1), NewSpan(start2, end2), @@ -651,10 +653,10 @@ func TestMergeAscending_ReturnsSingle_GivenStartAdjacentAndBeforeEndEndEqual(t * func TestMergeAscending_ReturnsSingle_GivenStartAdjacentAndBeforeEndEndAdjacentAndAfter( t *testing.T, ) { - start1 := MustNewDataStoreKey("/1/p/0/k3") - end1 := MustNewDataStoreKey("/1/p/0/k6") - start2 := MustNewDataStoreKey("/1/p/0/k5") - end2 := MustNewDataStoreKey("/1/p/0/k7") + start1 := keys.MustNewDataStoreKey("/1/p/0/k3") + end1 := keys.MustNewDataStoreKey("/1/p/0/k6") + start2 := keys.MustNewDataStoreKey("/1/p/0/k5") + end2 := keys.MustNewDataStoreKey("/1/p/0/k7") input := []Span{ NewSpan(start1, end1), NewSpan(start2, end2), @@ -668,10 +670,10 @@ func TestMergeAscending_ReturnsSingle_GivenStartAdjacentAndBeforeEndEndAdjacentA } func TestMergeAscending_ReturnsSingle_GivenStartAdjacentAndBeforeEndEndAfter(t *testing.T) { - start1 := MustNewDataStoreKey("/1/p/0/k3") - end1 := MustNewDataStoreKey("/1/p/0/k6") - start2 := MustNewDataStoreKey("/1/p/0/k5") - end2 := MustNewDataStoreKey("/1/p/0/k8") + start1 := keys.MustNewDataStoreKey("/1/p/0/k3") + end1 := keys.MustNewDataStoreKey("/1/p/0/k6") + start2 := keys.MustNewDataStoreKey("/1/p/0/k5") + end2 := keys.MustNewDataStoreKey("/1/p/0/k8") input := []Span{ NewSpan(start1, end1), NewSpan(start2, end2), @@ -685,10 +687,10 @@ func TestMergeAscending_ReturnsSingle_GivenStartAdjacentAndBeforeEndEndAfter(t * } func TestMergeAscending_ReturnsSingle_GivenStartAdjacentAndAfterEndEndAfter(t *testing.T) { - start1 := MustNewDataStoreKey("/1/p/0/k3") - end1 := MustNewDataStoreKey("/1/p/0/k6") - start2 := MustNewDataStoreKey("/1/p/0/k7") - end2 := MustNewDataStoreKey("/1/p/0/k8") + start1 := keys.MustNewDataStoreKey("/1/p/0/k3") + end1 := keys.MustNewDataStoreKey("/1/p/0/k6") + start2 := keys.MustNewDataStoreKey("/1/p/0/k7") + end2 := keys.MustNewDataStoreKey("/1/p/0/k8") input := []Span{ NewSpan(start1, end1), NewSpan(start2, end2), diff --git a/internal/core/encoding.go b/internal/core/encoding.go index 8c7930d6b9..4f190f3c67 100644 --- a/internal/core/encoding.go +++ b/internal/core/encoding.go @@ -17,7 +17,6 @@ import ( "github.com/sourcenetwork/immutable" "github.com/sourcenetwork/defradb/client" - "github.com/sourcenetwork/defradb/internal/encoding" ) // NormalizeFieldValue takes a field value and description and converts it to the @@ -232,188 +231,3 @@ func convertToJSON(propertyName string, untypedValue any) (any, error) { return untypedValue, nil } } - -// DecodeIndexDataStoreKey decodes a IndexDataStoreKey from bytes. -// It expects the input bytes is in the following format: -// -// /[CollectionID]/[IndexID]/[FieldValue](/[FieldValue]...) -// -// Where [CollectionID] and [IndexID] are integers -// -// All values of the fields are converted to standardized Defra Go type -// according to fields description. -func DecodeIndexDataStoreKey( - data []byte, - indexDesc *client.IndexDescription, - fields []client.FieldDefinition, -) (IndexDataStoreKey, error) { - if len(data) == 0 { - return IndexDataStoreKey{}, ErrEmptyKey - } - - if data[0] != '/' { - return IndexDataStoreKey{}, ErrInvalidKey - } - data = data[1:] - - data, colID, err := encoding.DecodeUvarintAscending(data) - if err != nil { - return IndexDataStoreKey{}, err - } - - key := IndexDataStoreKey{CollectionID: uint32(colID)} - - if data[0] != '/' { - return IndexDataStoreKey{}, ErrInvalidKey - } - data = data[1:] - - data, indID, err := encoding.DecodeUvarintAscending(data) - if err != nil { - return IndexDataStoreKey{}, err - } - key.IndexID = uint32(indID) - - if len(data) == 0 { - return key, nil - } - - for len(data) > 0 { - if data[0] != '/' { - return IndexDataStoreKey{}, ErrInvalidKey - } - data = data[1:] - - i := len(key.Fields) - descending := false - var kind client.FieldKind = client.FieldKind_DocID - // If the key has more values encoded then fields on the index description, the last - // value must be the docID and we treat it as a string. - if i < len(indexDesc.Fields) { - descending = indexDesc.Fields[i].Descending - kind = fields[i].Kind - } else if i > len(indexDesc.Fields) { - return IndexDataStoreKey{}, ErrInvalidKey - } - - if kind != nil && kind.IsArray() { - if arrKind, ok := kind.(client.ScalarArrayKind); ok { - kind = arrKind.SubKind() - } - } - - var val client.NormalValue - data, val, err = encoding.DecodeFieldValue(data, descending, kind) - if err != nil { - return IndexDataStoreKey{}, err - } - - key.Fields = append(key.Fields, IndexedField{Value: val, Descending: descending}) - } - - return key, nil -} - -// EncodeIndexDataStoreKey encodes a IndexDataStoreKey to bytes to be stored as a key -// for secondary indexes. -func EncodeIndexDataStoreKey(key *IndexDataStoreKey) []byte { - if key.CollectionID == 0 { - return []byte{} - } - - b := encoding.EncodeUvarintAscending([]byte{'/'}, uint64(key.CollectionID)) - - if key.IndexID == 0 { - return b - } - b = append(b, '/') - b = encoding.EncodeUvarintAscending(b, uint64(key.IndexID)) - - for _, field := range key.Fields { - b = append(b, '/') - b = encoding.EncodeFieldValue(b, field.Value, field.Descending) - } - - return b -} - -// DecodeDataStoreKey decodes a store key into a [DataStoreKey]. -func DecodeDataStoreKey(data []byte) (DataStoreKey, error) { - if len(data) == 0 { - return DataStoreKey{}, ErrEmptyKey - } - - if data[0] != '/' { - return DataStoreKey{}, ErrInvalidKey - } - data = data[1:] - - data, colRootID, err := encoding.DecodeUvarintAscending(data) - if err != nil { - return DataStoreKey{}, err - } - - var instanceType InstanceType - if len(data) > 1 { - if data[0] == '/' { - data = data[1:] - } - instanceType = InstanceType(data[0]) - data = data[1:] - } - - const docKeyLength int = 40 - var docID string - if len(data) > docKeyLength { - if data[0] == '/' { - data = data[1:] - } - docID = string(data[:docKeyLength]) - data = data[docKeyLength:] - } - - var fieldID string - if len(data) > 1 { - if data[0] == '/' { - data = data[1:] - } - // Todo: This should be encoded/decoded properly in - // https://github.com/sourcenetwork/defradb/issues/2818 - fieldID = string(data) - } - - return DataStoreKey{ - CollectionRootID: uint32(colRootID), - InstanceType: (instanceType), - DocID: docID, - FieldID: fieldID, - }, nil -} - -// EncodeDataStoreKey encodes a [*DataStoreKey] to a byte array suitable for sorting in the store. -func EncodeDataStoreKey(key *DataStoreKey) []byte { - var result []byte - - if key.CollectionRootID != 0 { - result = encoding.EncodeUvarintAscending([]byte{'/'}, uint64(key.CollectionRootID)) - } - - if key.InstanceType != "" { - result = append(result, '/') - result = append(result, []byte(string(key.InstanceType))...) - } - - if key.DocID != "" { - result = append(result, '/') - result = append(result, []byte(key.DocID)...) - } - - if key.FieldID != "" { - result = append(result, '/') - // Todo: This should be encoded/decoded properly in - // https://github.com/sourcenetwork/defradb/issues/2818 - result = append(result, []byte(key.FieldID)...) - } - - return result -} diff --git a/internal/core/key.go b/internal/core/key.go deleted file mode 100644 index cec1a1a0ca..0000000000 --- a/internal/core/key.go +++ /dev/null @@ -1,1020 +0,0 @@ -// Copyright 2022 Democratized Data Foundation -// -// Use of this software is governed by the Business Source License -// included in the file licenses/BSL.txt. -// -// As of the Change Date specified in that file, in accordance with -// the Business Source License, use of this software will be governed -// by the Apache License, Version 2.0, included in the file -// licenses/APL.txt. - -package core - -import ( - "fmt" - "strconv" - "strings" - - "github.com/ipfs/go-cid" - ds "github.com/ipfs/go-datastore" - "github.com/sourcenetwork/immutable" - - "github.com/sourcenetwork/defradb/client" - "github.com/sourcenetwork/defradb/errors" - "github.com/sourcenetwork/defradb/internal/encoding" -) - -// InstanceType is a type that represents the type of instance. -type InstanceType string - -const ( - // ValueKey is a type that represents a value instance. - ValueKey = InstanceType("v") - // PriorityKey is a type that represents a priority instance. - PriorityKey = InstanceType("p") - // DeletedKey is a type that represents a deleted document. - DeletedKey = InstanceType("d") -) - -const ( - COLLECTION = "collection" - COLLECTION_ID = "/collection/id" - COLLECTION_NAME = "/collection/name" - COLLECTION_SCHEMA_VERSION = "/collection/version" - COLLECTION_ROOT = "/collection/root" - COLLECTION_INDEX = "/collection/index" - COLLECTION_VIEW_ITEMS = "/collection/vi" - SCHEMA_VERSION = "/schema/version/v" - SCHEMA_VERSION_ROOT = "/schema/version/r" - COLLECTION_SEQ = "/seq/collection" - INDEX_ID_SEQ = "/seq/index" - FIELD_ID_SEQ = "/seq/field" - PRIMARY_KEY = "/pk" - DATASTORE_DOC_VERSION_FIELD_ID = "v" - P2P_COLLECTION = "/p2p/collection" - REPLICATOR = "/rep/id" - REPLICATOR_RETRY_ID = "/rep/retry/id" - REPLICATOR_RETRY_DOC = "/rep/retry/doc" -) - -// Key is an interface that represents a key in the database. -type Key interface { - ToString() string - Bytes() []byte - ToDS() ds.Key -} - -// DataStoreKey is a type that represents a key in the database. -type DataStoreKey struct { - CollectionRootID uint32 - InstanceType InstanceType - DocID string - FieldID string -} - -var _ Key = (*DataStoreKey)(nil) - -// ViewCacheKey is a trimmed down [DataStoreKey] used for caching the results -// of View items. -// -// It is stored in the format `/collection/vi/[CollectionRootID]/[ItemID]`. It points to the -// full serialized View item. -type ViewCacheKey struct { - // CollectionRootID is the Root of the Collection that this item belongs to. - CollectionRootID uint32 - - // ItemID is the unique (to this CollectionRootID) ID of the View item. - // - // For now this is essentially just the index of the item in the result-set, however - // that is likely to change in the near future. - ItemID uint -} - -var _ Key = (*ViewCacheKey)(nil) - -// IndexedField contains information necessary for storing a single -// value of a field in an index. -type IndexedField struct { - // Value is the value of the field in the index - Value client.NormalValue - // Descending is true if the field is sorted in descending order - Descending bool -} - -// IndexDataStoreKey is key of an indexed document in the database. -type IndexDataStoreKey struct { - // CollectionID is the id of the collection - CollectionID uint32 - // IndexID is the id of the index - IndexID uint32 - // Fields is the values of the fields in the index - Fields []IndexedField -} - -var _ Key = (*IndexDataStoreKey)(nil) - -type PrimaryDataStoreKey struct { - CollectionRootID uint32 - DocID string -} - -var _ Key = (*PrimaryDataStoreKey)(nil) - -type HeadStoreKey struct { - DocID string - FieldID string //can be 'C' - Cid cid.Cid -} - -var _ Key = (*HeadStoreKey)(nil) - -// CollectionKey points to the json serialized description of the -// the collection of the given ID. -type CollectionKey struct { - CollectionID uint32 -} - -var _ Key = (*CollectionKey)(nil) - -// CollectionNameKey points to the ID of the collection of the given -// name. -type CollectionNameKey struct { - Name string -} - -var _ Key = (*CollectionNameKey)(nil) - -// CollectionSchemaVersionKey points to nil, but the keys/prefix can be used -// to get collections that are using, or have used a given schema version. -// -// If a collection is updated to a different schema version, the old entry(s) -// of this key will be preserved. -// -// This key should be removed in https://github.com/sourcenetwork/defradb/issues/1085 -type CollectionSchemaVersionKey struct { - SchemaVersionID string - CollectionID uint32 -} - -var _ Key = (*CollectionSchemaVersionKey)(nil) - -// CollectionRootKey points to nil, but the keys/prefix can be used -// to get collections that are of a given RootID. -// -// It is stored in the format `/collection/root/[RootID]/[CollectionID]`. -type CollectionRootKey struct { - RootID uint32 - CollectionID uint32 -} - -var _ Key = (*CollectionRootKey)(nil) - -// CollectionIndexKey to a stored description of an index -type CollectionIndexKey struct { - // CollectionID is the id of the collection that the index is on - CollectionID immutable.Option[uint32] - // IndexName is the name of the index - IndexName string -} - -var _ Key = (*CollectionIndexKey)(nil) - -// SchemaVersionKey points to the json serialized schema at the specified version. -// -// It's corresponding value is immutable. -type SchemaVersionKey struct { - SchemaVersionID string -} - -var _ Key = (*SchemaVersionKey)(nil) - -// SchemaRootKey indexes schema version ids by their root schema id. -// -// The index is the key, there are no values stored against the key. -type SchemaRootKey struct { - SchemaRoot string - SchemaVersionID string -} - -var _ Key = (*SchemaRootKey)(nil) - -type P2PCollectionKey struct { - CollectionID string -} - -var _ Key = (*P2PCollectionKey)(nil) - -// CollectionIDSequenceKey is used to key the sequence used to generate collection ids. -type CollectionIDSequenceKey struct{} - -var _ Key = (*CollectionIDSequenceKey)(nil) - -// IndexIDSequenceKey is used to key the sequence used to generate index ids. -// -// The sequence is specific to each collection version. -type IndexIDSequenceKey struct { - CollectionID uint32 -} - -var _ Key = (*IndexIDSequenceKey)(nil) - -// FieldIDSequenceKey is used to key the sequence used to generate field ids. -// -// The sequence is specific to each collection root. Multiple collection of the same root -// must maintain consistent field ids. -type FieldIDSequenceKey struct { - CollectionRoot uint32 -} - -var _ Key = (*FieldIDSequenceKey)(nil) - -type ReplicatorKey struct { - ReplicatorID string -} - -var _ Key = (*ReplicatorKey)(nil) - -// Creates a new DataStoreKey from a string as best as it can, -// splitting the input using '/' as a field deliminator. It assumes -// that the input string is in the following format: -// -// /[CollectionRootId]/[InstanceType]/[DocID]/[FieldId] -// -// Any properties before the above (assuming a '/' deliminator) are ignored -func NewDataStoreKey(key string) (DataStoreKey, error) { - return DecodeDataStoreKey([]byte(key)) -} - -func MustNewDataStoreKey(key string) DataStoreKey { - dsKey, err := NewDataStoreKey(key) - if err != nil { - panic(err) - } - return dsKey -} - -func DataStoreKeyFromDocID(docID client.DocID) DataStoreKey { - return DataStoreKey{ - DocID: docID.String(), - } -} - -// Creates a new HeadStoreKey from a string as best as it can, -// splitting the input using '/' as a field deliminator. It assumes -// that the input string is in the following format: -// -// /[DocID]/[FieldId]/[Cid] -// -// Any properties before the above are ignored -func NewHeadStoreKey(key string) (HeadStoreKey, error) { - elements := strings.Split(key, "/") - if len(elements) != 4 { - return HeadStoreKey{}, ErrInvalidKey - } - - cid, err := cid.Decode(elements[3]) - if err != nil { - return HeadStoreKey{}, err - } - - return HeadStoreKey{ - // elements[0] is empty (key has leading '/') - DocID: elements[1], - FieldID: elements[2], - Cid: cid, - }, nil -} - -// Returns a formatted collection key for the system data store. -// It assumes the name of the collection is non-empty. -func NewCollectionKey(id uint32) CollectionKey { - return CollectionKey{CollectionID: id} -} - -func NewCollectionNameKey(name string) CollectionNameKey { - return CollectionNameKey{Name: name} -} - -func NewCollectionSchemaVersionKey(schemaVersionId string, collectionID uint32) CollectionSchemaVersionKey { - return CollectionSchemaVersionKey{ - SchemaVersionID: schemaVersionId, - CollectionID: collectionID, - } -} - -func NewCollectionSchemaVersionKeyFromString(key string) (CollectionSchemaVersionKey, error) { - elements := strings.Split(key, "/") - colID, err := strconv.Atoi(elements[len(elements)-1]) - if err != nil { - return CollectionSchemaVersionKey{}, err - } - - return CollectionSchemaVersionKey{ - SchemaVersionID: elements[len(elements)-2], - CollectionID: uint32(colID), - }, nil -} - -func NewCollectionRootKey(rootID uint32, collectionID uint32) CollectionRootKey { - return CollectionRootKey{ - RootID: rootID, - CollectionID: collectionID, - } -} - -// NewCollectionRootKeyFromString creates a new [CollectionRootKey]. -// -// It expects the key to be in the format `/collection/root/[RootID]/[CollectionID]`. -func NewCollectionRootKeyFromString(key string) (CollectionRootKey, error) { - keyArr := strings.Split(key, "/") - if len(keyArr) != 5 || keyArr[1] != COLLECTION || keyArr[2] != "root" { - return CollectionRootKey{}, ErrInvalidKey - } - rootID, err := strconv.Atoi(keyArr[3]) - if err != nil { - return CollectionRootKey{}, err - } - - collectionID, err := strconv.Atoi(keyArr[4]) - if err != nil { - return CollectionRootKey{}, err - } - - return CollectionRootKey{ - RootID: uint32(rootID), - CollectionID: uint32(collectionID), - }, nil -} - -// NewCollectionIndexKey creates a new CollectionIndexKey from a collection name and index name. -func NewCollectionIndexKey(colID immutable.Option[uint32], indexName string) CollectionIndexKey { - return CollectionIndexKey{CollectionID: colID, IndexName: indexName} -} - -// NewCollectionIndexKeyFromString creates a new CollectionIndexKey from a string. -// It expects the input string is in the following format: -// -// /collection/index/[CollectionID]/[IndexName] -// -// Where [IndexName] might be omitted. Anything else will return an error. -func NewCollectionIndexKeyFromString(key string) (CollectionIndexKey, error) { - keyArr := strings.Split(key, "/") - if len(keyArr) < 4 || len(keyArr) > 5 || keyArr[1] != COLLECTION || keyArr[2] != "index" { - return CollectionIndexKey{}, ErrInvalidKey - } - - colID, err := strconv.Atoi(keyArr[3]) - if err != nil { - return CollectionIndexKey{}, err - } - - result := CollectionIndexKey{CollectionID: immutable.Some(uint32(colID))} - if len(keyArr) == 5 { - result.IndexName = keyArr[4] - } - return result, nil -} - -// ToString returns the string representation of the key -// It is in the following format: -// /collection/index/[CollectionID]/[IndexName] -// if [CollectionID] is empty, the rest is ignored -func (k CollectionIndexKey) ToString() string { - result := COLLECTION_INDEX - - if k.CollectionID.HasValue() { - result = result + "/" + fmt.Sprint(k.CollectionID.Value()) - if k.IndexName != "" { - result = result + "/" + k.IndexName - } - } - - return result -} - -// Bytes returns the byte representation of the key -func (k CollectionIndexKey) Bytes() []byte { - return []byte(k.ToString()) -} - -// ToDS returns the datastore key -func (k CollectionIndexKey) ToDS() ds.Key { - return ds.NewKey(k.ToString()) -} - -func NewSchemaVersionKey(schemaVersionID string) SchemaVersionKey { - return SchemaVersionKey{SchemaVersionID: schemaVersionID} -} - -func NewSchemaRootKey(schemaRoot string, schemaVersionID string) SchemaRootKey { - return SchemaRootKey{ - SchemaRoot: schemaRoot, - SchemaVersionID: schemaVersionID, - } -} - -func NewSchemaRootKeyFromString(keyString string) (SchemaRootKey, error) { - keyString = strings.TrimPrefix(keyString, SCHEMA_VERSION_ROOT+"/") - elements := strings.Split(keyString, "/") - if len(elements) != 2 { - return SchemaRootKey{}, ErrInvalidKey - } - - return SchemaRootKey{ - SchemaRoot: elements[0], - SchemaVersionID: elements[1], - }, nil -} - -func NewIndexIDSequenceKey(collectionID uint32) IndexIDSequenceKey { - return IndexIDSequenceKey{CollectionID: collectionID} -} - -func NewFieldIDSequenceKey(collectionRoot uint32) FieldIDSequenceKey { - return FieldIDSequenceKey{CollectionRoot: collectionRoot} -} - -func (k DataStoreKey) WithValueFlag() DataStoreKey { - newKey := k - newKey.InstanceType = ValueKey - return newKey -} - -func (k DataStoreKey) WithPriorityFlag() DataStoreKey { - newKey := k - newKey.InstanceType = PriorityKey - return newKey -} - -func (k DataStoreKey) WithDeletedFlag() DataStoreKey { - newKey := k - newKey.InstanceType = DeletedKey - return newKey -} - -func (k DataStoreKey) WithDocID(docID string) DataStoreKey { - newKey := k - newKey.DocID = docID - return newKey -} - -func (k DataStoreKey) WithInstanceInfo(key DataStoreKey) DataStoreKey { - newKey := k - newKey.DocID = key.DocID - newKey.FieldID = key.FieldID - newKey.InstanceType = key.InstanceType - return newKey -} - -func (k DataStoreKey) WithFieldID(fieldID string) DataStoreKey { - newKey := k - newKey.FieldID = fieldID - return newKey -} - -func (k DataStoreKey) ToHeadStoreKey() HeadStoreKey { - return HeadStoreKey{ - DocID: k.DocID, - FieldID: k.FieldID, - } -} - -func (k HeadStoreKey) WithDocID(docID string) HeadStoreKey { - newKey := k - newKey.DocID = docID - return newKey -} - -func (k HeadStoreKey) WithCid(c cid.Cid) HeadStoreKey { - newKey := k - newKey.Cid = c - return newKey -} - -func (k HeadStoreKey) WithFieldID(fieldID string) HeadStoreKey { - newKey := k - newKey.FieldID = fieldID - return newKey -} - -func (k DataStoreKey) ToString() string { - return string(k.Bytes()) -} - -func (k DataStoreKey) Bytes() []byte { - return EncodeDataStoreKey(&k) -} - -func (k DataStoreKey) ToDS() ds.Key { - return ds.NewKey(k.ToString()) -} - -func (k DataStoreKey) PrettyPrint() string { - var result string - - if k.CollectionRootID != 0 { - result = result + "/" + strconv.Itoa(int(k.CollectionRootID)) - } - if k.InstanceType != "" { - result = result + "/" + string(k.InstanceType) - } - if k.DocID != "" { - result = result + "/" + k.DocID - } - if k.FieldID != "" { - result = result + "/" + k.FieldID - } - - return result -} - -func (k DataStoreKey) Equal(other DataStoreKey) bool { - return k.CollectionRootID == other.CollectionRootID && - k.DocID == other.DocID && - k.FieldID == other.FieldID && - k.InstanceType == other.InstanceType -} - -func (k DataStoreKey) ToPrimaryDataStoreKey() PrimaryDataStoreKey { - return PrimaryDataStoreKey{ - CollectionRootID: k.CollectionRootID, - DocID: k.DocID, - } -} - -func NewViewCacheColPrefix(rootID uint32) ViewCacheKey { - return ViewCacheKey{ - CollectionRootID: rootID, - } -} - -func NewViewCacheKey(rootID uint32, itemID uint) ViewCacheKey { - return ViewCacheKey{ - CollectionRootID: rootID, - ItemID: itemID, - } -} - -func (k ViewCacheKey) ToString() string { - return string(k.Bytes()) -} - -func (k ViewCacheKey) Bytes() []byte { - result := []byte(COLLECTION_VIEW_ITEMS) - - if k.CollectionRootID != 0 { - result = append(result, '/') - result = encoding.EncodeUvarintAscending(result, uint64(k.CollectionRootID)) - } - - if k.ItemID != 0 { - result = append(result, '/') - result = encoding.EncodeUvarintAscending(result, uint64(k.ItemID)) - } - - return result -} - -func (k ViewCacheKey) ToDS() ds.Key { - return ds.NewKey(k.ToString()) -} - -func (k ViewCacheKey) PrettyPrint() string { - result := COLLECTION_VIEW_ITEMS - - if k.CollectionRootID != 0 { - result = result + "/" + strconv.Itoa(int(k.CollectionRootID)) - } - if k.ItemID != 0 { - result = result + "/" + strconv.Itoa(int(k.ItemID)) - } - - return result -} - -// NewIndexDataStoreKey creates a new IndexDataStoreKey from a collection ID, index ID and fields. -// It also validates values of the fields. -func NewIndexDataStoreKey(collectionID, indexID uint32, fields []IndexedField) IndexDataStoreKey { - return IndexDataStoreKey{ - CollectionID: collectionID, - IndexID: indexID, - Fields: fields, - } -} - -// Bytes returns the byte representation of the key -func (k *IndexDataStoreKey) Bytes() []byte { - return EncodeIndexDataStoreKey(k) -} - -// ToDS returns the datastore key -func (k *IndexDataStoreKey) ToDS() ds.Key { - return ds.NewKey(k.ToString()) -} - -// ToString returns the string representation of the key -// It is in the following format: -// /[CollectionID]/[IndexID]/[FieldValue](/[FieldValue]...) -// If while composing the string from left to right, a component -// is empty, the string is returned up to that point -func (k *IndexDataStoreKey) ToString() string { - return string(k.Bytes()) -} - -// Equal returns true if the two keys are equal -func (k *IndexDataStoreKey) Equal(other IndexDataStoreKey) bool { - if k.CollectionID != other.CollectionID || k.IndexID != other.IndexID { - return false - } - - if len(k.Fields) != len(other.Fields) { - return false - } - - for i, field := range k.Fields { - if !field.Value.Equal(other.Fields[i].Value) || field.Descending != other.Fields[i].Descending { - return false - } - } - - return true -} - -func (k PrimaryDataStoreKey) ToDataStoreKey() DataStoreKey { - return DataStoreKey{ - CollectionRootID: k.CollectionRootID, - DocID: k.DocID, - } -} - -func (k PrimaryDataStoreKey) Bytes() []byte { - return []byte(k.ToString()) -} - -func (k PrimaryDataStoreKey) ToDS() ds.Key { - return ds.NewKey(k.ToString()) -} - -func (k PrimaryDataStoreKey) ToString() string { - result := "" - - if k.CollectionRootID != 0 { - result = result + "/" + fmt.Sprint(k.CollectionRootID) - } - result = result + PRIMARY_KEY - if k.DocID != "" { - result = result + "/" + k.DocID - } - - return result -} - -func (k CollectionKey) ToString() string { - return fmt.Sprintf("%s/%s", COLLECTION_ID, strconv.Itoa(int(k.CollectionID))) -} - -func (k CollectionKey) Bytes() []byte { - return []byte(k.ToString()) -} - -func (k CollectionKey) ToDS() ds.Key { - return ds.NewKey(k.ToString()) -} - -func (k CollectionNameKey) ToString() string { - return fmt.Sprintf("%s/%s", COLLECTION_NAME, k.Name) -} - -func (k CollectionNameKey) Bytes() []byte { - return []byte(k.ToString()) -} - -func (k CollectionNameKey) ToDS() ds.Key { - return ds.NewKey(k.ToString()) -} - -func (k CollectionSchemaVersionKey) ToString() string { - result := COLLECTION_SCHEMA_VERSION - - if k.SchemaVersionID != "" { - result = result + "/" + k.SchemaVersionID - } - - if k.CollectionID != 0 { - result = fmt.Sprintf("%s/%s", result, strconv.Itoa(int(k.CollectionID))) - } - - return result -} - -func (k CollectionSchemaVersionKey) Bytes() []byte { - return []byte(k.ToString()) -} - -func (k CollectionSchemaVersionKey) ToDS() ds.Key { - return ds.NewKey(k.ToString()) -} - -func (k CollectionRootKey) ToString() string { - result := COLLECTION_ROOT - - if k.RootID != 0 { - result = fmt.Sprintf("%s/%s", result, strconv.Itoa(int(k.RootID))) - } - - if k.CollectionID != 0 { - result = fmt.Sprintf("%s/%s", result, strconv.Itoa(int(k.CollectionID))) - } - - return result -} - -func (k CollectionRootKey) Bytes() []byte { - return []byte(k.ToString()) -} - -func (k CollectionRootKey) ToDS() ds.Key { - return ds.NewKey(k.ToString()) -} - -func (k SchemaVersionKey) ToString() string { - result := SCHEMA_VERSION - - if k.SchemaVersionID != "" { - result = result + "/" + k.SchemaVersionID - } - - return result -} - -func (k SchemaVersionKey) Bytes() []byte { - return []byte(k.ToString()) -} - -func (k SchemaVersionKey) ToDS() ds.Key { - return ds.NewKey(k.ToString()) -} - -func (k SchemaRootKey) ToString() string { - result := SCHEMA_VERSION_ROOT - - if k.SchemaRoot != "" { - result = result + "/" + k.SchemaRoot - } - - if k.SchemaVersionID != "" { - result = result + "/" + k.SchemaVersionID - } - - return result -} - -func (k SchemaRootKey) Bytes() []byte { - return []byte(k.ToString()) -} - -func (k SchemaRootKey) ToDS() ds.Key { - return ds.NewKey(k.ToString()) -} - -func (k CollectionIDSequenceKey) ToString() string { - return COLLECTION_SEQ -} - -func (k CollectionIDSequenceKey) Bytes() []byte { - return []byte(k.ToString()) -} - -func (k CollectionIDSequenceKey) ToDS() ds.Key { - return ds.NewKey(k.ToString()) -} - -func (k IndexIDSequenceKey) ToString() string { - return INDEX_ID_SEQ + "/" + strconv.Itoa(int(k.CollectionID)) -} - -func (k IndexIDSequenceKey) Bytes() []byte { - return []byte(k.ToString()) -} - -func (k IndexIDSequenceKey) ToDS() ds.Key { - return ds.NewKey(k.ToString()) -} - -func (k FieldIDSequenceKey) ToString() string { - return FIELD_ID_SEQ + "/" + strconv.Itoa(int(k.CollectionRoot)) -} - -func (k FieldIDSequenceKey) Bytes() []byte { - return []byte(k.ToString()) -} - -func (k FieldIDSequenceKey) ToDS() ds.Key { - return ds.NewKey(k.ToString()) -} - -// New -func NewP2PCollectionKey(collectionID string) P2PCollectionKey { - return P2PCollectionKey{CollectionID: collectionID} -} - -func NewP2PCollectionKeyFromString(key string) (P2PCollectionKey, error) { - keyArr := strings.Split(key, "/") - if len(keyArr) != 4 { - return P2PCollectionKey{}, errors.WithStack(ErrInvalidKey, errors.NewKV("Key", key)) - } - return NewP2PCollectionKey(keyArr[3]), nil -} - -func (k P2PCollectionKey) ToString() string { - result := P2P_COLLECTION - - if k.CollectionID != "" { - result = result + "/" + k.CollectionID - } - - return result -} - -func (k P2PCollectionKey) Bytes() []byte { - return []byte(k.ToString()) -} - -func (k P2PCollectionKey) ToDS() ds.Key { - return ds.NewKey(k.ToString()) -} - -func NewReplicatorKey(id string) ReplicatorKey { - return ReplicatorKey{ReplicatorID: id} -} - -func (k ReplicatorKey) ToString() string { - result := REPLICATOR - - if k.ReplicatorID != "" { - result = result + "/" + k.ReplicatorID - } - - return result -} - -func (k ReplicatorKey) Bytes() []byte { - return []byte(k.ToString()) -} - -func (k ReplicatorKey) ToDS() ds.Key { - return ds.NewKey(k.ToString()) -} - -func (k HeadStoreKey) ToString() string { - var result string - - if k.DocID != "" { - result = result + "/" + k.DocID - } - if k.FieldID != "" { - result = result + "/" + k.FieldID - } - if k.Cid.Defined() { - result = result + "/" + k.Cid.String() - } - - return result -} - -func (k HeadStoreKey) Bytes() []byte { - return []byte(k.ToString()) -} - -func (k HeadStoreKey) ToDS() ds.Key { - return ds.NewKey(k.ToString()) -} - -// PrefixEnd determines the end key given key as a prefix, that is the key that sorts precisely -// behind all keys starting with prefix: "1" is added to the final byte and the carry propagated. -// The special cases of nil and KeyMin always returns KeyMax. -func (k DataStoreKey) PrefixEnd() DataStoreKey { - newKey := k - - if k.FieldID != "" { - newKey.FieldID = string(bytesPrefixEnd([]byte(k.FieldID))) - return newKey - } - if k.DocID != "" { - newKey.DocID = string(bytesPrefixEnd([]byte(k.DocID))) - return newKey - } - if k.InstanceType != "" { - newKey.InstanceType = InstanceType(bytesPrefixEnd([]byte(k.InstanceType))) - return newKey - } - if k.CollectionRootID != 0 { - newKey.CollectionRootID = k.CollectionRootID + 1 - return newKey - } - - return newKey -} - -// FieldIDAsUint extracts the Field Identifier from the Key. -// In a Primary index, the last key path is the FieldIDAsUint. -// This may be different in Secondary Indexes. -// An error is returned if it can't correct convert the field to a uint32. -func (k DataStoreKey) FieldIDAsUint() (uint32, error) { - fieldID, err := strconv.Atoi(k.FieldID) - if err != nil { - return 0, NewErrFailedToGetFieldIdOfKey(err) - } - return uint32(fieldID), nil -} - -func bytesPrefixEnd(b []byte) []byte { - end := make([]byte, len(b)) - copy(end, b) - for i := len(end) - 1; i >= 0; i-- { - end[i] = end[i] + 1 - if end[i] != 0 { - return end[:i+1] - } - } - // This statement will only be reached if the key is already a - // maximal byte string (i.e. already \xff...). - return b -} - -type ReplicatorRetryIDKey struct { - PeerID string -} - -var _ Key = (*ReplicatorRetryIDKey)(nil) - -func NewReplicatorRetryIDKey(peerID string) ReplicatorRetryIDKey { - return ReplicatorRetryIDKey{ - PeerID: peerID, - } -} - -// NewReplicatorRetryIDKeyFromString creates a new [ReplicatorRetryIDKey] from a string. -// -// It expects the input string to be in the format `/rep/retry/id/[PeerID]`. -func NewReplicatorRetryIDKeyFromString(key string) (ReplicatorRetryIDKey, error) { - peerID := strings.TrimPrefix(key, REPLICATOR_RETRY_ID+"/") - if peerID == "" { - return ReplicatorRetryIDKey{}, errors.WithStack(ErrInvalidKey, errors.NewKV("Key", key)) - } - return NewReplicatorRetryIDKey(peerID), nil -} - -func (k ReplicatorRetryIDKey) ToString() string { - return REPLICATOR_RETRY_ID + "/" + k.PeerID -} - -func (k ReplicatorRetryIDKey) Bytes() []byte { - return []byte(k.ToString()) -} - -func (k ReplicatorRetryIDKey) ToDS() ds.Key { - return ds.NewKey(k.ToString()) -} - -type ReplicatorRetryDocIDKey struct { - PeerID string - DocID string -} - -var _ Key = (*ReplicatorRetryDocIDKey)(nil) - -func NewReplicatorRetryDocIDKey(peerID, docID string) ReplicatorRetryDocIDKey { - return ReplicatorRetryDocIDKey{ - PeerID: peerID, - DocID: docID, - } -} - -// NewReplicatorRetryDocIDKeyFromString creates a new [ReplicatorRetryDocIDKey] from a string. -// -// It expects the input string to be in the format `/rep/retry/doc/[PeerID]/[DocID]`. -func NewReplicatorRetryDocIDKeyFromString(key string) (ReplicatorRetryDocIDKey, error) { - trimmedKey := strings.TrimPrefix(key, REPLICATOR_RETRY_DOC+"/") - keyArr := strings.Split(trimmedKey, "/") - if len(keyArr) != 2 { - return ReplicatorRetryDocIDKey{}, errors.WithStack(ErrInvalidKey, errors.NewKV("Key", key)) - } - return NewReplicatorRetryDocIDKey(keyArr[0], keyArr[1]), nil -} - -func (k ReplicatorRetryDocIDKey) ToString() string { - keyString := REPLICATOR_RETRY_DOC + "/" + k.PeerID - if k.DocID != "" { - keyString += "/" + k.DocID - } - return keyString -} - -func (k ReplicatorRetryDocIDKey) Bytes() []byte { - return []byte(k.ToString()) -} - -func (k ReplicatorRetryDocIDKey) ToDS() ds.Key { - return ds.NewKey(k.ToString()) -} diff --git a/internal/db/base/collection_keys.go b/internal/db/base/collection_keys.go index 8878d50b13..31cdeef18c 100644 --- a/internal/db/base/collection_keys.go +++ b/internal/db/base/collection_keys.go @@ -15,11 +15,12 @@ import ( "github.com/sourcenetwork/defradb/client" "github.com/sourcenetwork/defradb/internal/core" + "github.com/sourcenetwork/defradb/internal/keys" ) // MakeDataStoreKeyWithCollectionDescription returns the datastore key for the given collection description. -func MakeDataStoreKeyWithCollectionDescription(col client.CollectionDescription) core.DataStoreKey { - return core.DataStoreKey{ +func MakeDataStoreKeyWithCollectionDescription(col client.CollectionDescription) keys.DataStoreKey { + return keys.DataStoreKey{ CollectionRootID: col.RootID, } } @@ -28,8 +29,8 @@ func MakeDataStoreKeyWithCollectionDescription(col client.CollectionDescription) func MakeDataStoreKeyWithCollectionAndDocID( col client.CollectionDescription, docID string, -) core.DataStoreKey { - return core.DataStoreKey{ +) keys.DataStoreKey { + return keys.DataStoreKey{ CollectionRootID: col.RootID, DocID: docID, } @@ -38,9 +39,9 @@ func MakeDataStoreKeyWithCollectionAndDocID( func MakePrimaryIndexKeyForCRDT( c client.CollectionDefinition, ctype client.CType, - key core.DataStoreKey, + key keys.DataStoreKey, fieldName string, -) (core.DataStoreKey, error) { +) (keys.DataStoreKey, error) { switch ctype { case client.COMPOSITE: return MakeDataStoreKeyWithCollectionDescription(c.Description). @@ -50,7 +51,7 @@ func MakePrimaryIndexKeyForCRDT( case client.LWW_REGISTER, client.PN_COUNTER, client.P_COUNTER: field, ok := c.GetFieldByName(fieldName) if !ok { - return core.DataStoreKey{}, client.NewErrFieldNotExist(fieldName) + return keys.DataStoreKey{}, client.NewErrFieldNotExist(fieldName) } return MakeDataStoreKeyWithCollectionDescription(c.Description). @@ -58,5 +59,5 @@ func MakePrimaryIndexKeyForCRDT( WithFieldID(fmt.Sprint(field.ID)), nil } - return core.DataStoreKey{}, ErrInvalidCrdtType + return keys.DataStoreKey{}, ErrInvalidCrdtType } diff --git a/internal/db/collection.go b/internal/db/collection.go index 39e8757598..af631701fc 100644 --- a/internal/db/collection.go +++ b/internal/db/collection.go @@ -34,6 +34,7 @@ import ( "github.com/sourcenetwork/defradb/internal/db/description" "github.com/sourcenetwork/defradb/internal/db/fetcher" "github.com/sourcenetwork/defradb/internal/encryption" + "github.com/sourcenetwork/defradb/internal/keys" "github.com/sourcenetwork/defradb/internal/lens" merklecrdt "github.com/sourcenetwork/defradb/internal/merkle/crdt" ) @@ -280,7 +281,7 @@ func (c *collection) getAllDocIDsChan( ctx context.Context, ) (<-chan client.DocIDResult, error) { txn := mustGetContextTxn(ctx) - prefix := core.PrimaryDataStoreKey{ // empty path for all keys prefix + prefix := keys.PrimaryDataStoreKey{ // empty path for all keys prefix CollectionRootID: c.Description().RootID, } q, err := txn.Datastore().Query(ctx, query.Query{ @@ -420,15 +421,15 @@ func (c *collection) CreateMany( func (c *collection) getDocIDAndPrimaryKeyFromDoc( doc *client.Document, -) (client.DocID, core.PrimaryDataStoreKey, error) { +) (client.DocID, keys.PrimaryDataStoreKey, error) { docID, err := doc.GenerateDocID() if err != nil { - return client.DocID{}, core.PrimaryDataStoreKey{}, err + return client.DocID{}, keys.PrimaryDataStoreKey{}, err } primaryKey := c.getPrimaryKeyFromDocID(docID) if primaryKey.DocID != doc.ID().String() { - return client.DocID{}, core.PrimaryDataStoreKey{}, + return client.DocID{}, keys.PrimaryDataStoreKey{}, NewErrDocVerification(doc.ID().String(), primaryKey.DocID) } return docID, primaryKey, nil @@ -667,7 +668,7 @@ func (c *collection) save( merkleCRDT, err := merklecrdt.InstanceWithStore( txn, - core.NewCollectionSchemaVersionKey(c.Schema().VersionID, c.ID()), + keys.NewCollectionSchemaVersionKey(c.Schema().VersionID, c.ID()), val.Type(), fieldDescription.Kind, fieldKey, @@ -860,7 +861,7 @@ func (c *collection) Exists( // check if a document exists with the given primary key func (c *collection) exists( ctx context.Context, - primaryKey core.PrimaryDataStoreKey, + primaryKey keys.PrimaryDataStoreKey, ) (exists bool, isDeleted bool, err error) { canRead, err := c.checkAccessOfDocWithACP( ctx, @@ -894,7 +895,7 @@ func (c *collection) exists( // Calling it elsewhere could cause the omission of acp checks. func (c *collection) saveCompositeToMerkleCRDT( ctx context.Context, - dsKey core.DataStoreKey, + dsKey keys.DataStoreKey, links []coreblock.DAGLink, status client.DocumentStatus, ) (cidlink.Link, []byte, error) { @@ -902,7 +903,7 @@ func (c *collection) saveCompositeToMerkleCRDT( dsKey = dsKey.WithFieldID(core.COMPOSITE_NAMESPACE) merkleCRDT := merklecrdt.NewMerkleCompositeDAG( txn, - core.NewCollectionSchemaVersionKey(c.Schema().VersionID, c.ID()), + keys.NewCollectionSchemaVersionKey(c.Schema().VersionID, c.ID()), dsKey, ) @@ -913,28 +914,28 @@ func (c *collection) saveCompositeToMerkleCRDT( return merkleCRDT.Save(ctx, links) } -func (c *collection) getPrimaryKeyFromDocID(docID client.DocID) core.PrimaryDataStoreKey { - return core.PrimaryDataStoreKey{ +func (c *collection) getPrimaryKeyFromDocID(docID client.DocID) keys.PrimaryDataStoreKey { + return keys.PrimaryDataStoreKey{ CollectionRootID: c.Description().RootID, DocID: docID.String(), } } -func (c *collection) getDataStoreKeyFromDocID(docID client.DocID) core.DataStoreKey { - return core.DataStoreKey{ +func (c *collection) getDataStoreKeyFromDocID(docID client.DocID) keys.DataStoreKey { + return keys.DataStoreKey{ CollectionRootID: c.Description().RootID, DocID: docID.String(), - InstanceType: core.ValueKey, + InstanceType: keys.ValueKey, } } -func (c *collection) tryGetFieldKey(primaryKey core.PrimaryDataStoreKey, fieldName string) (core.DataStoreKey, bool) { +func (c *collection) tryGetFieldKey(primaryKey keys.PrimaryDataStoreKey, fieldName string) (keys.DataStoreKey, bool) { fieldID, hasField := c.tryGetFieldID(fieldName) if !hasField { - return core.DataStoreKey{}, false + return keys.DataStoreKey{}, false } - return core.DataStoreKey{ + return keys.DataStoreKey{ CollectionRootID: c.Description().RootID, DocID: primaryKey.DocID, FieldID: strconv.FormatUint(uint64(fieldID), 10), diff --git a/internal/db/collection_delete.go b/internal/db/collection_delete.go index 62d7c24e50..468095b54c 100644 --- a/internal/db/collection_delete.go +++ b/internal/db/collection_delete.go @@ -16,8 +16,8 @@ import ( "github.com/sourcenetwork/defradb/acp" "github.com/sourcenetwork/defradb/client" "github.com/sourcenetwork/defradb/event" - "github.com/sourcenetwork/defradb/internal/core" coreblock "github.com/sourcenetwork/defradb/internal/core/block" + "github.com/sourcenetwork/defradb/internal/keys" ) // DeleteWithFilter deletes using a filter to target documents for delete. @@ -87,7 +87,7 @@ func (c *collection) deleteWithFilter( // Extract the docID in the string format from the document value. docID := doc.GetID() - primaryKey := core.PrimaryDataStoreKey{ + primaryKey := keys.PrimaryDataStoreKey{ CollectionRootID: c.Description().RootID, DocID: docID, } @@ -109,7 +109,7 @@ func (c *collection) deleteWithFilter( func (c *collection) applyDelete( ctx context.Context, - primaryKey core.PrimaryDataStoreKey, + primaryKey keys.PrimaryDataStoreKey, ) error { // Must also have read permission to delete, inorder to check if document exists. found, isDeleted, err := c.exists(ctx, primaryKey) diff --git a/internal/db/collection_get.go b/internal/db/collection_get.go index 05e6d43308..f2db5f0f8c 100644 --- a/internal/db/collection_get.go +++ b/internal/db/collection_get.go @@ -18,6 +18,7 @@ import ( "github.com/sourcenetwork/defradb/internal/core" "github.com/sourcenetwork/defradb/internal/db/base" "github.com/sourcenetwork/defradb/internal/db/fetcher" + "github.com/sourcenetwork/defradb/internal/keys" ) func (c *collection) Get( @@ -55,7 +56,7 @@ func (c *collection) Get( func (c *collection) get( ctx context.Context, - primaryKey core.PrimaryDataStoreKey, + primaryKey keys.PrimaryDataStoreKey, fields []client.FieldDefinition, showDeleted bool, ) (*client.Document, error) { diff --git a/internal/db/collection_id.go b/internal/db/collection_id.go index e635a4477f..84edcbb1c4 100644 --- a/internal/db/collection_id.go +++ b/internal/db/collection_id.go @@ -17,7 +17,7 @@ import ( "github.com/sourcenetwork/defradb/client" "github.com/sourcenetwork/defradb/client/request" - "github.com/sourcenetwork/defradb/internal/core" + "github.com/sourcenetwork/defradb/internal/keys" ) // setCollectionIDs sets the IDs on a collection description, including field IDs, mutating the input set. @@ -33,7 +33,7 @@ func (db *db) setCollectionIDs(ctx context.Context, newCollections []client.Coll // setCollectionID sets the IDs directly on a collection description, excluding stuff like field IDs, // mutating the input set. func (db *db) setCollectionID(ctx context.Context, newCollections []client.CollectionDefinition) error { - colSeq, err := db.getSequence(ctx, core.CollectionIDSequenceKey{}) + colSeq, err := db.getSequence(ctx, keys.CollectionIDSequenceKey{}) if err != nil { return err } @@ -75,7 +75,7 @@ func (db *db) setFieldIDs(ctx context.Context, definitions []client.CollectionDe } for i := range definitions { - fieldSeq, err := db.getSequence(ctx, core.NewFieldIDSequenceKey(definitions[i].Description.RootID)) + fieldSeq, err := db.getSequence(ctx, keys.NewFieldIDSequenceKey(definitions[i].Description.RootID)) if err != nil { return err } diff --git a/internal/db/collection_index.go b/internal/db/collection_index.go index eb2b1b8d4c..b1baad8369 100644 --- a/internal/db/collection_index.go +++ b/internal/db/collection_index.go @@ -28,6 +28,7 @@ import ( "github.com/sourcenetwork/defradb/internal/db/base" "github.com/sourcenetwork/defradb/internal/db/description" "github.com/sourcenetwork/defradb/internal/db/fetcher" + "github.com/sourcenetwork/defradb/internal/keys" "github.com/sourcenetwork/defradb/internal/request/graphql/schema" ) @@ -61,9 +62,9 @@ func (db *db) getAllIndexDescriptions( ) (map[client.CollectionName][]client.IndexDescription, error) { // callers of this function must set a context transaction txn := mustGetContextTxn(ctx) - prefix := core.NewCollectionIndexKey(immutable.None[uint32](), "") + prefix := keys.NewCollectionIndexKey(immutable.None[uint32](), "") - keys, indexDescriptions, err := datastore.DeserializePrefix[client.IndexDescription](ctx, + indexKeys, indexDescriptions, err := datastore.DeserializePrefix[client.IndexDescription](ctx, prefix.ToString(), txn.Systemstore()) if err != nil { @@ -72,8 +73,8 @@ func (db *db) getAllIndexDescriptions( indexes := make(map[client.CollectionName][]client.IndexDescription) - for i := range keys { - indexKey, err := core.NewCollectionIndexKeyFromString(keys[i]) + for i := range indexKeys { + indexKey, err := keys.NewCollectionIndexKeyFromString(indexKeys[i]) if err != nil { return nil, NewErrInvalidStoredIndexKey(indexKey.ToString()) } @@ -98,7 +99,7 @@ func (db *db) fetchCollectionIndexDescriptions( ) ([]client.IndexDescription, error) { // callers of this function must set a context transaction txn := mustGetContextTxn(ctx) - prefix := core.NewCollectionIndexKey(immutable.Some(colID), "") + prefix := keys.NewCollectionIndexKey(immutable.Some(colID), "") _, indexDescriptions, err := datastore.DeserializePrefix[client.IndexDescription]( ctx, prefix.ToString(), @@ -257,7 +258,7 @@ func (c *collection) createIndex( colSeq, err := c.db.getSequence( ctx, - core.NewIndexIDSequenceKey(c.ID()), + keys.NewIndexIDSequenceKey(c.ID()), ) if err != nil { return nil, err @@ -411,7 +412,7 @@ func (c *collection) dropIndex(ctx context.Context, indexName string) error { break } } - key := core.NewCollectionIndexKey(immutable.Some(c.ID()), indexName) + key := keys.NewCollectionIndexKey(immutable.Some(c.ID()), indexName) err = txn.Systemstore().Delete(ctx, key.ToDS()) if err != nil { return err @@ -423,7 +424,7 @@ func (c *collection) dropIndex(ctx context.Context, indexName string) error { func (c *collection) dropAllIndexes(ctx context.Context) error { // callers of this function must set a context transaction txn := mustGetContextTxn(ctx) - prefix := core.NewCollectionIndexKey(immutable.Some(c.ID()), "") + prefix := keys.NewCollectionIndexKey(immutable.Some(c.ID()), "") keys, err := datastore.FetchKeysForPrefix(ctx, prefix.ToString(), txn.Systemstore()) if err != nil { @@ -494,19 +495,19 @@ func (c *collection) checkExistingFieldsAndAdjustRelFieldNames( func (c *collection) generateIndexNameIfNeededAndCreateKey( ctx context.Context, desc *client.IndexDescription, -) (core.CollectionIndexKey, error) { +) (keys.CollectionIndexKey, error) { // callers of this function must set a context transaction txn := mustGetContextTxn(ctx) - var indexKey core.CollectionIndexKey + var indexKey keys.CollectionIndexKey if desc.Name == "" { nameIncrement := 1 for { desc.Name = generateIndexName(c, desc.Fields, nameIncrement) - indexKey = core.NewCollectionIndexKey(immutable.Some(c.ID()), desc.Name) + indexKey = keys.NewCollectionIndexKey(immutable.Some(c.ID()), desc.Name) exists, err := txn.Systemstore().Has(ctx, indexKey.ToDS()) if err != nil { - return core.CollectionIndexKey{}, err + return keys.CollectionIndexKey{}, err } if !exists { break @@ -514,13 +515,13 @@ func (c *collection) generateIndexNameIfNeededAndCreateKey( nameIncrement++ } } else { - indexKey = core.NewCollectionIndexKey(immutable.Some(c.ID()), desc.Name) + indexKey = keys.NewCollectionIndexKey(immutable.Some(c.ID()), desc.Name) exists, err := txn.Systemstore().Has(ctx, indexKey.ToDS()) if err != nil { - return core.CollectionIndexKey{}, err + return keys.CollectionIndexKey{}, err } if exists { - return core.CollectionIndexKey{}, NewErrIndexWithNameAlreadyExists(desc.Name) + return keys.CollectionIndexKey{}, NewErrIndexWithNameAlreadyExists(desc.Name) } } return indexKey, nil diff --git a/internal/db/db.go b/internal/db/db.go index 2e5363b94b..f2782bbe3a 100644 --- a/internal/db/db.go +++ b/internal/db/db.go @@ -34,6 +34,7 @@ import ( "github.com/sourcenetwork/defradb/event" "github.com/sourcenetwork/defradb/internal/core" "github.com/sourcenetwork/defradb/internal/db/permission" + "github.com/sourcenetwork/defradb/internal/keys" "github.com/sourcenetwork/defradb/internal/request/graphql" ) @@ -354,7 +355,7 @@ func (db *db) initialize(ctx context.Context) error { // init meta data // collection sequence - _, err = db.getSequence(ctx, core.CollectionIDSequenceKey{}) + _, err = db.getSequence(ctx, keys.CollectionIDSequenceKey{}) if err != nil { return err } diff --git a/internal/db/description/collection.go b/internal/db/description/collection.go index 20f652888e..1c8c4667ef 100644 --- a/internal/db/description/collection.go +++ b/internal/db/description/collection.go @@ -21,7 +21,7 @@ import ( "github.com/sourcenetwork/defradb/client" "github.com/sourcenetwork/defradb/datastore" - "github.com/sourcenetwork/defradb/internal/core" + "github.com/sourcenetwork/defradb/internal/keys" ) // SaveCollection saves the given collection to the system store overwriting any @@ -41,14 +41,14 @@ func SaveCollection( return client.CollectionDescription{}, err } - key := core.NewCollectionKey(desc.ID) + key := keys.NewCollectionKey(desc.ID) err = txn.Systemstore().Put(ctx, key.ToDS(), buf) if err != nil { return client.CollectionDescription{}, err } if existing.Name.HasValue() && existing.Name != desc.Name { - nameKey := core.NewCollectionNameKey(existing.Name.Value()) + nameKey := keys.NewCollectionNameKey(existing.Name.Value()) idBuf, err := txn.Systemstore().Get(ctx, nameKey.ToDS()) nameIndexExsts := true if err != nil { @@ -82,7 +82,7 @@ func SaveCollection( return client.CollectionDescription{}, err } - nameKey := core.NewCollectionNameKey(desc.Name.Value()) + nameKey := keys.NewCollectionNameKey(desc.Name.Value()) err = txn.Systemstore().Put(ctx, nameKey.ToDS(), idBuf) if err != nil { return client.CollectionDescription{}, err @@ -91,13 +91,13 @@ func SaveCollection( // The need for this key is temporary, we should replace it with the global collection ID // https://github.com/sourcenetwork/defradb/issues/1085 - schemaVersionKey := core.NewCollectionSchemaVersionKey(desc.SchemaVersionID, desc.ID) + schemaVersionKey := keys.NewCollectionSchemaVersionKey(desc.SchemaVersionID, desc.ID) err = txn.Systemstore().Put(ctx, schemaVersionKey.ToDS(), []byte{}) if err != nil { return client.CollectionDescription{}, err } - rootKey := core.NewCollectionRootKey(desc.RootID, desc.ID) + rootKey := keys.NewCollectionRootKey(desc.RootID, desc.ID) err = txn.Systemstore().Put(ctx, rootKey.ToDS(), []byte{}) if err != nil { return client.CollectionDescription{}, err @@ -111,7 +111,7 @@ func GetCollectionByID( txn datastore.Txn, id uint32, ) (client.CollectionDescription, error) { - key := core.NewCollectionKey(id) + key := keys.NewCollectionKey(id) buf, err := txn.Systemstore().Get(ctx, key.ToDS()) if err != nil { return client.CollectionDescription{}, err @@ -134,7 +134,7 @@ func GetCollectionByName( txn datastore.Txn, name string, ) (client.CollectionDescription, error) { - nameKey := core.NewCollectionNameKey(name) + nameKey := keys.NewCollectionNameKey(name) idBuf, err := txn.Systemstore().Get(ctx, nameKey.ToDS()) if err != nil { return client.CollectionDescription{}, err @@ -154,7 +154,7 @@ func GetCollectionsByRoot( txn datastore.Txn, root uint32, ) ([]client.CollectionDescription, error) { - rootKey := core.NewCollectionRootKey(root, 0) + rootKey := keys.NewCollectionRootKey(root, 0) rootQuery, err := txn.Systemstore().Query(ctx, query.Query{ Prefix: rootKey.ToString(), @@ -173,7 +173,7 @@ func GetCollectionsByRoot( return nil, err } - rootKey, err := core.NewCollectionRootKeyFromString(string(res.Key)) + rootKey, err := keys.NewCollectionRootKeyFromString(string(res.Key)) if err != nil { if err := rootQuery.Close(); err != nil { return nil, NewErrFailedToCloseSchemaQuery(err) @@ -201,7 +201,7 @@ func GetCollectionsBySchemaVersionID( txn datastore.Txn, schemaVersionID string, ) ([]client.CollectionDescription, error) { - schemaVersionKey := core.NewCollectionSchemaVersionKey(schemaVersionID, 0) + schemaVersionKey := keys.NewCollectionSchemaVersionKey(schemaVersionID, 0) schemaVersionQuery, err := txn.Systemstore().Query(ctx, query.Query{ Prefix: schemaVersionKey.ToString(), @@ -220,7 +220,7 @@ func GetCollectionsBySchemaVersionID( return nil, err } - colSchemaVersionKey, err := core.NewCollectionSchemaVersionKeyFromString(string(res.Key)) + colSchemaVersionKey, err := keys.NewCollectionSchemaVersionKeyFromString(string(res.Key)) if err != nil { if err := schemaVersionQuery.Close(); err != nil { return nil, NewErrFailedToCloseSchemaQuery(err) @@ -233,7 +233,7 @@ func GetCollectionsBySchemaVersionID( cols := make([]client.CollectionDescription, len(colIDs)) for i, colID := range colIDs { - key := core.NewCollectionKey(colID) + key := keys.NewCollectionKey(colID) buf, err := txn.Systemstore().Get(ctx, key.ToDS()) if err != nil { return nil, err @@ -286,7 +286,7 @@ func GetCollections( txn datastore.Txn, ) ([]client.CollectionDescription, error) { q, err := txn.Systemstore().Query(ctx, query.Query{ - Prefix: core.COLLECTION_ID, + Prefix: keys.COLLECTION_ID, }) if err != nil { return nil, NewErrFailedToCreateCollectionQuery(err) @@ -322,7 +322,7 @@ func GetActiveCollections( txn datastore.Txn, ) ([]client.CollectionDescription, error) { q, err := txn.Systemstore().Query(ctx, query.Query{ - Prefix: core.NewCollectionNameKey("").ToString(), + Prefix: keys.NewCollectionNameKey("").ToString(), }) if err != nil { return nil, NewErrFailedToCreateCollectionQuery(err) @@ -364,6 +364,6 @@ func HasCollectionByName( txn datastore.Txn, name string, ) (bool, error) { - nameKey := core.NewCollectionNameKey(name) + nameKey := keys.NewCollectionNameKey(name) return txn.Systemstore().Has(ctx, nameKey.ToDS()) } diff --git a/internal/db/description/schema.go b/internal/db/description/schema.go index f9d5935770..3df17f7e1e 100644 --- a/internal/db/description/schema.go +++ b/internal/db/description/schema.go @@ -18,7 +18,7 @@ import ( "github.com/sourcenetwork/defradb/client" "github.com/sourcenetwork/defradb/datastore" - "github.com/sourcenetwork/defradb/internal/core" + "github.com/sourcenetwork/defradb/internal/keys" ) // CreateSchemaVersion creates and saves to the store a new schema version. @@ -34,7 +34,7 @@ func CreateSchemaVersion( return client.SchemaDescription{}, err } - key := core.NewSchemaVersionKey(desc.VersionID) + key := keys.NewSchemaVersionKey(desc.VersionID) err = txn.Systemstore().Put(ctx, key.ToDS(), buf) if err != nil { return client.SchemaDescription{}, err @@ -43,7 +43,7 @@ func CreateSchemaVersion( isNew := desc.Root == desc.VersionID if !isNew { // We don't need to add a root key if this is the first version - schemaVersionHistoryKey := core.NewSchemaRootKey(desc.Root, desc.VersionID) + schemaVersionHistoryKey := keys.NewSchemaRootKey(desc.Root, desc.VersionID) err = txn.Systemstore().Put(ctx, schemaVersionHistoryKey.ToDS(), []byte{}) if err != nil { return client.SchemaDescription{}, err @@ -62,7 +62,7 @@ func GetSchemaVersion( txn datastore.Txn, versionID string, ) (client.SchemaDescription, error) { - key := core.NewSchemaVersionKey(versionID) + key := keys.NewSchemaVersionKey(versionID) buf, err := txn.Systemstore().Get(ctx, key.ToDS()) if err != nil { @@ -135,7 +135,7 @@ func GetSchemas( versionIDs = append(versionIDs, col.SchemaVersionID) } - schemaVersionPrefix := core.NewSchemaVersionKey("") + schemaVersionPrefix := keys.NewSchemaVersionKey("") schemaVersionQuery, err := txn.Systemstore().Query(ctx, query.Query{ Prefix: schemaVersionPrefix.ToString(), }) @@ -181,7 +181,7 @@ func GetAllSchemas( ctx context.Context, txn datastore.Txn, ) ([]client.SchemaDescription, error) { - prefix := core.NewSchemaVersionKey("") + prefix := keys.NewSchemaVersionKey("") q, err := txn.Systemstore().Query(ctx, query.Query{ Prefix: prefix.ToString(), }) @@ -226,7 +226,7 @@ func GetSchemaVersionIDs( // It is not present in the history prefix. schemaVersions := []string{schemaRoot} - prefix := core.NewSchemaRootKey(schemaRoot, "") + prefix := keys.NewSchemaRootKey(schemaRoot, "") q, err := txn.Systemstore().Query(ctx, query.Query{ Prefix: prefix.ToString(), KeysOnly: true, @@ -243,7 +243,7 @@ func GetSchemaVersionIDs( return nil, err } - key, err := core.NewSchemaRootKeyFromString(res.Key) + key, err := keys.NewSchemaRootKeyFromString(res.Key) if err != nil { return nil, err } diff --git a/internal/db/fetcher/dag.go b/internal/db/fetcher/dag.go index 3d3a6dd85e..395354fc08 100644 --- a/internal/db/fetcher/dag.go +++ b/internal/db/fetcher/dag.go @@ -21,6 +21,7 @@ import ( "github.com/sourcenetwork/defradb/datastore" "github.com/sourcenetwork/defradb/internal/core" + "github.com/sourcenetwork/defradb/internal/keys" ) // HeadFetcher is a utility to incrementally fetch all the MerkleCRDT heads of a given doc/field. @@ -40,8 +41,8 @@ func (hf *HeadFetcher) Start( if len(spans.Value) == 0 { spans = core.NewSpans( core.NewSpan( - core.DataStoreKey{}, - core.DataStoreKey{}.PrefixEnd(), + keys.DataStoreKey{}, + keys.DataStoreKey{}.PrefixEnd(), ), ) } @@ -87,7 +88,7 @@ func (hf *HeadFetcher) FetchNext() (*cid.Cid, error) { return nil, nil } - headStoreKey, err := core.NewHeadStoreKey(res.Key) + headStoreKey, err := keys.NewHeadStoreKey(res.Key) if err != nil { return nil, err } diff --git a/internal/db/fetcher/fetcher.go b/internal/db/fetcher/fetcher.go index 06e3255e8c..62a03a4d17 100644 --- a/internal/db/fetcher/fetcher.go +++ b/internal/db/fetcher/fetcher.go @@ -28,6 +28,7 @@ import ( "github.com/sourcenetwork/defradb/internal/core" "github.com/sourcenetwork/defradb/internal/db/base" "github.com/sourcenetwork/defradb/internal/db/permission" + "github.com/sourcenetwork/defradb/internal/keys" "github.com/sourcenetwork/defradb/internal/planner/mapper" "github.com/sourcenetwork/defradb/internal/request/graphql/parser" ) @@ -78,7 +79,7 @@ type Fetcher interface { // keyValue is a KV store response containing the resulting core.Key and byte array value. type keyValue struct { - Key core.DataStoreKey + Key keys.DataStoreKey Value []byte } @@ -366,7 +367,7 @@ func (df *DocumentFetcher) nextKey(ctx context.Context, seekNext bool) (spanDone } } - if df.kv != nil && (df.kv.Key.InstanceType != core.ValueKey && df.kv.Key.InstanceType != core.DeletedKey) { + if df.kv != nil && (df.kv.Key.InstanceType != keys.ValueKey && df.kv.Key.InstanceType != keys.DeletedKey) { // We can only ready value values, if we escape the collection's value keys // then we must be done and can stop reading spanDone = true @@ -450,19 +451,19 @@ func (df *DocumentFetcher) seekKV(key string) (bool, *keyValue, error) { // - It directly interacts with the KVIterator. // - Returns true if the entire iterator/span is exhausted // - Returns a kv pair instead of internally updating -func (df *DocumentFetcher) nextKVRaw() (bool, core.DataStoreKey, dsq.Result, error) { +func (df *DocumentFetcher) nextKVRaw() (bool, keys.DataStoreKey, dsq.Result, error) { res, available := df.kvResultsIter.NextSync() if !available { - return true, core.DataStoreKey{}, res, nil + return true, keys.DataStoreKey{}, res, nil } err := res.Error if err != nil { - return true, core.DataStoreKey{}, res, err + return true, keys.DataStoreKey{}, res, err } - dsKey, err := core.NewDataStoreKey(res.Key) + dsKey, err := keys.NewDataStoreKey(res.Key) if err != nil { - return true, core.DataStoreKey{}, res, err + return true, keys.DataStoreKey{}, res, err } return false, dsKey, res, nil @@ -504,7 +505,7 @@ func (df *DocumentFetcher) processKV(kv *keyValue) error { } } - if kv.Key.FieldID == core.DATASTORE_DOC_VERSION_FIELD_ID { + if kv.Key.FieldID == keys.DATASTORE_DOC_VERSION_FIELD_ID { df.doc.schemaVersionID = string(kv.Value) return nil } diff --git a/internal/db/fetcher/indexer_iterators.go b/internal/db/fetcher/indexer_iterators.go index ecf964185d..5e3671d3aa 100644 --- a/internal/db/fetcher/indexer_iterators.go +++ b/internal/db/fetcher/indexer_iterators.go @@ -22,7 +22,7 @@ import ( "github.com/sourcenetwork/defradb/client" "github.com/sourcenetwork/defradb/datastore" "github.com/sourcenetwork/defradb/internal/connor" - "github.com/sourcenetwork/defradb/internal/core" + "github.com/sourcenetwork/defradb/internal/keys" "github.com/sourcenetwork/defradb/internal/planner/mapper" "github.com/ipfs/go-datastore/query" @@ -62,7 +62,7 @@ type indexIterator interface { } type indexIterResult struct { - key core.IndexDataStoreKey + key keys.IndexDataStoreKey foundKey bool value []byte } @@ -71,7 +71,7 @@ type indexIterResult struct { type indexPrefixIterator struct { indexDesc client.IndexDescription indexedFields []client.FieldDefinition - indexKey core.IndexDataStoreKey + indexKey keys.IndexDataStoreKey matchers []valueMatcher execInfo *ExecInfo resultIter query.Results @@ -114,7 +114,7 @@ func (iter *indexPrefixIterator) nextResult() (indexIterResult, error) { if !hasVal { return indexIterResult{}, nil } - key, err := core.DecodeIndexDataStoreKey([]byte(res.Key), &iter.indexDesc, iter.indexedFields) + key, err := keys.DecodeIndexDataStoreKey([]byte(res.Key), &iter.indexDesc, iter.indexedFields) if err != nil { return indexIterResult{}, err } @@ -151,7 +151,7 @@ func (iter *indexPrefixIterator) Close() error { } type eqSingleIndexIterator struct { - indexKey core.IndexDataStoreKey + indexKey keys.IndexDataStoreKey execInfo *ExecInfo ctx context.Context @@ -305,7 +305,7 @@ func (iter *arrayIndexIterator) Close() error { return iter.inner.Close() } -func executeValueMatchers(matchers []valueMatcher, fields []core.IndexedField) (bool, error) { +func executeValueMatchers(matchers []valueMatcher, fields []keys.IndexedField) (bool, error) { for i := range matchers { res, err := matchers[i].Match(fields[i].Value) if err != nil { @@ -576,7 +576,7 @@ func (f *IndexFetcher) newPrefixIteratorFromConditions( } func (f *IndexFetcher) newPrefixIterator( - indexKey core.IndexDataStoreKey, + indexKey keys.IndexDataStoreKey, matchers []valueMatcher, execInfo *ExecInfo, ) *indexPrefixIterator { @@ -624,7 +624,7 @@ func (f *IndexFetcher) newInIndexIterator( } } else { indexKey := f.newIndexDataStoreKey() - indexKey.Fields = []core.IndexedField{{Descending: f.indexDesc.Fields[0].Descending}} + indexKey.Fields = []keys.IndexedField{{Descending: f.indexDesc.Fields[0].Descending}} iter = f.newPrefixIterator(indexKey, matchers, &f.execInfo) } @@ -634,18 +634,18 @@ func (f *IndexFetcher) newInIndexIterator( }, nil } -func (f *IndexFetcher) newIndexDataStoreKey() core.IndexDataStoreKey { - key := core.IndexDataStoreKey{CollectionID: f.col.ID(), IndexID: f.indexDesc.ID} +func (f *IndexFetcher) newIndexDataStoreKey() keys.IndexDataStoreKey { + key := keys.IndexDataStoreKey{CollectionID: f.col.ID(), IndexID: f.indexDesc.ID} return key } -func (f *IndexFetcher) newIndexDataStoreKeyWithValues(values []client.NormalValue) core.IndexDataStoreKey { - fields := make([]core.IndexedField, len(values)) +func (f *IndexFetcher) newIndexDataStoreKeyWithValues(values []client.NormalValue) keys.IndexDataStoreKey { + fields := make([]keys.IndexedField, len(values)) for i := range values { fields[i].Value = values[i] fields[i].Descending = f.indexDesc.Fields[i].Descending } - return core.NewIndexDataStoreKey(f.col.ID(), f.indexDesc.ID, fields) + return keys.NewIndexDataStoreKey(f.col.ID(), f.indexDesc.ID, fields) } func (f *IndexFetcher) createIndexIterator() (indexIterator, error) { diff --git a/internal/db/fetcher/versioned.go b/internal/db/fetcher/versioned.go index 2660664bcd..508b0ea406 100644 --- a/internal/db/fetcher/versioned.go +++ b/internal/db/fetcher/versioned.go @@ -28,6 +28,7 @@ import ( "github.com/sourcenetwork/defradb/internal/core" coreblock "github.com/sourcenetwork/defradb/internal/core/block" "github.com/sourcenetwork/defradb/internal/db/base" + "github.com/sourcenetwork/defradb/internal/keys" merklecrdt "github.com/sourcenetwork/defradb/internal/merkle/crdt" "github.com/sourcenetwork/defradb/internal/planner/mapper" ) @@ -89,7 +90,7 @@ type VersionedFetcher struct { root datastore.Rootstore store datastore.Txn - dsKey core.DataStoreKey + dsKey keys.DataStoreKey version cid.Cid queuedCids *list.List @@ -392,7 +393,7 @@ func (vf *VersionedFetcher) processBlock( } mcrdt, err = merklecrdt.InstanceWithStore( vf.store, - core.CollectionSchemaVersionKey{}, + keys.CollectionSchemaVersionKey{}, ctype, kind, dsKey, @@ -427,7 +428,7 @@ func (vf *VersionedFetcher) Close() error { } // NewVersionedSpan creates a new VersionedSpan from a DataStoreKey and a version CID. -func NewVersionedSpan(dsKey core.DataStoreKey, version cid.Cid) core.Spans { +func NewVersionedSpan(dsKey keys.DataStoreKey, version cid.Cid) core.Spans { // Todo: Dont abuse DataStoreKey for version cid! - return core.NewSpans(core.NewSpan(dsKey, core.DataStoreKey{DocID: version.String()})) + return core.NewSpans(core.NewSpan(dsKey, keys.DataStoreKey{DocID: version.String()})) } diff --git a/internal/db/index.go b/internal/db/index.go index c3860dca5a..638f0b923b 100644 --- a/internal/db/index.go +++ b/internal/db/index.go @@ -16,7 +16,7 @@ import ( "github.com/sourcenetwork/defradb/client" "github.com/sourcenetwork/defradb/datastore" "github.com/sourcenetwork/defradb/errors" - "github.com/sourcenetwork/defradb/internal/core" + "github.com/sourcenetwork/defradb/internal/keys" "github.com/sourcenetwork/defradb/internal/utils/slice" ) @@ -121,28 +121,28 @@ func (index *collectionBaseIndex) getDocFieldValues(doc *client.Document) ([]cli func (index *collectionBaseIndex) getDocumentsIndexKey( doc *client.Document, appendDocID bool, -) (core.IndexDataStoreKey, error) { +) (keys.IndexDataStoreKey, error) { fieldValues, err := index.getDocFieldValues(doc) if err != nil { - return core.IndexDataStoreKey{}, err + return keys.IndexDataStoreKey{}, err } - fields := make([]core.IndexedField, len(index.fieldsDescs)) + fields := make([]keys.IndexedField, len(index.fieldsDescs)) for i := range index.fieldsDescs { fields[i].Value = fieldValues[i] fields[i].Descending = index.desc.Fields[i].Descending } if appendDocID { - fields = append(fields, core.IndexedField{Value: client.NewNormalString(doc.ID().String())}) + fields = append(fields, keys.IndexedField{Value: client.NewNormalString(doc.ID().String())}) } - return core.NewIndexDataStoreKey(index.collection.ID(), index.desc.ID, fields), nil + return keys.NewIndexDataStoreKey(index.collection.ID(), index.desc.ID, fields), nil } func (index *collectionBaseIndex) deleteIndexKey( ctx context.Context, txn datastore.Txn, - key core.IndexDataStoreKey, + key keys.IndexDataStoreKey, ) error { exists, err := txn.Datastore().Has(ctx, key.ToDS()) if err != nil { @@ -157,7 +157,7 @@ func (index *collectionBaseIndex) deleteIndexKey( // RemoveAll remove all artifacts of the index from the storage, i.e. all index // field values for all documents. func (index *collectionBaseIndex) RemoveAll(ctx context.Context, txn datastore.Txn) error { - prefixKey := core.IndexDataStoreKey{} + prefixKey := keys.IndexDataStoreKey{} prefixKey.CollectionID = index.collection.ID() prefixKey.IndexID = index.desc.ID @@ -196,7 +196,7 @@ var _ CollectionIndex = (*collectionSimpleIndex)(nil) func (index *collectionSimpleIndex) getDocumentsIndexKey( doc *client.Document, -) (core.IndexDataStoreKey, error) { +) (keys.IndexDataStoreKey, error) { // docID is appended, as it's part of the key for non-unique indexes return index.collectionBaseIndex.getDocumentsIndexKey(doc, true) } @@ -252,7 +252,7 @@ func (index *collectionSimpleIndex) deleteDocIndex( } // hasIndexKeyNilField returns true if the index key has a field with nil value -func hasIndexKeyNilField(key *core.IndexDataStoreKey) bool { +func hasIndexKeyNilField(key *keys.IndexDataStoreKey) bool { for i := range key.Fields { if key.Fields[i].Value.IsNil() { return true @@ -270,7 +270,7 @@ var _ CollectionIndex = (*collectionUniqueIndex)(nil) func (index *collectionUniqueIndex) save( ctx context.Context, txn datastore.Txn, - key *core.IndexDataStoreKey, + key *keys.IndexDataStoreKey, val []byte, ) error { err := txn.Datastore().Put(ctx, key.ToDS(), val) @@ -312,20 +312,20 @@ func newUniqueIndexError(doc *client.Document, fieldsDescs []client.SchemaFieldD func (index *collectionBaseIndex) getDocumentsUniqueIndexRecord( doc *client.Document, -) (core.IndexDataStoreKey, []byte, error) { +) (keys.IndexDataStoreKey, []byte, error) { key, err := index.getDocumentsIndexKey(doc, false) if err != nil { - return core.IndexDataStoreKey{}, nil, err + return keys.IndexDataStoreKey{}, nil, err } return makeUniqueKeyValueRecord(key, doc) } func makeUniqueKeyValueRecord( - key core.IndexDataStoreKey, + key keys.IndexDataStoreKey, doc *client.Document, -) (core.IndexDataStoreKey, []byte, error) { +) (keys.IndexDataStoreKey, []byte, error) { if hasIndexKeyNilField(&key) { - key.Fields = append(key.Fields, core.IndexedField{Value: client.NewNormalString(doc.ID().String())}) + key.Fields = append(key.Fields, keys.IndexedField{Value: client.NewNormalString(doc.ID().String())}) return key, []byte{}, nil } else { return key, []byte(doc.ID().String()), nil @@ -336,10 +336,10 @@ func (index *collectionUniqueIndex) prepareUniqueIndexRecordToStore( ctx context.Context, txn datastore.Txn, doc *client.Document, -) (core.IndexDataStoreKey, []byte, error) { +) (keys.IndexDataStoreKey, []byte, error) { key, val, err := index.getDocumentsUniqueIndexRecord(doc) if err != nil { - return core.IndexDataStoreKey{}, nil, err + return keys.IndexDataStoreKey{}, nil, err } return key, val, validateUniqueKeyValue(ctx, txn, key, val, doc, index.fieldsDescs) } @@ -347,7 +347,7 @@ func (index *collectionUniqueIndex) prepareUniqueIndexRecordToStore( func validateUniqueKeyValue( ctx context.Context, txn datastore.Txn, - key core.IndexDataStoreKey, + key keys.IndexDataStoreKey, val []byte, doc *client.Document, fieldsDescs []client.SchemaFieldDescription, @@ -455,7 +455,7 @@ func newCollectionArrayBaseIndex(base collectionBaseIndex) collectionArrayBaseIn func (index *collectionArrayBaseIndex) newIndexKeyGenerator( doc *client.Document, appendDocID bool, -) (func() (core.IndexDataStoreKey, bool), error) { +) (func() (keys.IndexDataStoreKey, bool), error) { key, err := index.getDocumentsIndexKey(doc, appendDocID) if err != nil { return nil, err @@ -478,15 +478,15 @@ func (index *collectionArrayBaseIndex) newIndexKeyGenerator( // This function generates the next key by iterating through all possible combinations. // It works pretty much like a digital clock that first iterates through seconds, then minutes, etc. - return func() (core.IndexDataStoreKey, bool) { + return func() (keys.IndexDataStoreKey, bool) { if done { - return core.IndexDataStoreKey{}, false + return keys.IndexDataStoreKey{}, false } - resultKey := core.IndexDataStoreKey{ + resultKey := keys.IndexDataStoreKey{ CollectionID: key.CollectionID, IndexID: key.IndexID, - Fields: make([]core.IndexedField, len(key.Fields)), + Fields: make([]keys.IndexedField, len(key.Fields)), } copy(resultKey.Fields, key.Fields) @@ -520,12 +520,12 @@ func (index *collectionArrayBaseIndex) newIndexKeyGenerator( func (index *collectionArrayBaseIndex) getAllKeys( doc *client.Document, appendDocID bool, -) ([]core.IndexDataStoreKey, error) { +) ([]keys.IndexDataStoreKey, error) { getNextOldKey, err := index.newIndexKeyGenerator(doc, appendDocID) if err != nil { return nil, err } - keys := make([]core.IndexDataStoreKey, 0) + keys := make([]keys.IndexDataStoreKey, 0) for { key, ok := getNextOldKey() if !ok { @@ -542,7 +542,7 @@ func (index *collectionArrayBaseIndex) deleteRetiredKeysAndReturnNew( oldDoc *client.Document, newDoc *client.Document, appendDocID bool, -) ([]core.IndexDataStoreKey, error) { +) ([]keys.IndexDataStoreKey, error) { prevKeys, err := index.getAllKeys(oldDoc, appendDocID) if err != nil { return nil, err @@ -553,7 +553,7 @@ func (index *collectionArrayBaseIndex) deleteRetiredKeysAndReturnNew( } for _, prevKey := range prevKeys { - keyEqual := func(key core.IndexDataStoreKey) bool { return prevKey.Equal(key) } + keyEqual := func(key keys.IndexDataStoreKey) bool { return prevKey.Equal(key) } rem, removedVal := slice.RemoveFirstIf(currentKeys, keyEqual) // If a previous keys is not among the current keys, it should be retired if !removedVal.HasValue() { @@ -683,7 +683,7 @@ func (index *collectionArrayUniqueIndex) addNewUniqueKey( ctx context.Context, txn datastore.Txn, doc *client.Document, - key core.IndexDataStoreKey, + key keys.IndexDataStoreKey, ) error { key, val, err := makeUniqueKeyValueRecord(key, doc) if err != nil { diff --git a/internal/db/index_test.go b/internal/db/index_test.go index 779bcdff84..950f41c47f 100644 --- a/internal/db/index_test.go +++ b/internal/db/index_test.go @@ -29,7 +29,7 @@ import ( "github.com/sourcenetwork/defradb/datastore" "github.com/sourcenetwork/defradb/datastore/mocks" "github.com/sourcenetwork/defradb/errors" - "github.com/sourcenetwork/defradb/internal/core" + "github.com/sourcenetwork/defradb/internal/keys" "github.com/sourcenetwork/defradb/internal/request/graphql/schema" ) @@ -244,7 +244,7 @@ func (f *indexTestFixture) dropIndex(colName, indexName string) error { } func (f *indexTestFixture) countIndexPrefixes(indexName string) int { - prefix := core.NewCollectionIndexKey(immutable.Some(f.users.ID()), indexName) + prefix := keys.NewCollectionIndexKey(immutable.Some(f.users.ID()), indexName) q, err := f.txn.Systemstore().Query(f.ctx, query.Query{ Prefix: prefix.ToString(), }) @@ -422,7 +422,7 @@ func TestCreateIndex_ShouldSaveToSystemStorage(t *testing.T) { _, err := f.createCollectionIndex(desc) assert.NoError(t, err) - key := core.NewCollectionIndexKey(immutable.Some(f.users.ID()), name) + key := keys.NewCollectionIndexKey(immutable.Some(f.users.ID()), name) data, err := f.txn.Systemstore().Get(f.ctx, key.ToDS()) assert.NoError(t, err) var deserialized client.IndexDescription @@ -474,7 +474,7 @@ func TestCreateIndex_WithMultipleCollectionsAndIndexes_AssignIncrementedIDPerCol desc, err := f.createCollectionIndexFor(col.Name().Value(), makeIndex(fieldName)) require.NoError(t, err) assert.Equal(t, expectedID, desc.ID) - seqKey := core.NewIndexIDSequenceKey(col.ID()) + seqKey := keys.NewIndexIDSequenceKey(col.ID()) storedSeqKey, err := f.txn.Systemstore().Get(f.ctx, seqKey.ToDS()) assert.NoError(t, err) storedSeqVal := binary.BigEndian.Uint64(storedSeqKey) @@ -563,7 +563,7 @@ func TestGetIndexes_IfInvalidIndexIsStored_ReturnError(t *testing.T) { f := newIndexTestFixture(t) defer f.db.Close() - indexKey := core.NewCollectionIndexKey(immutable.Some(f.users.ID()), "users_name_index") + indexKey := keys.NewCollectionIndexKey(immutable.Some(f.users.ID()), "users_name_index") err := f.txn.Systemstore().Put(f.ctx, indexKey.ToDS(), []byte("invalid")) assert.NoError(t, err) @@ -575,7 +575,7 @@ func TestGetIndexes_IfInvalidIndexKeyIsStored_ReturnError(t *testing.T) { f := newIndexTestFixture(t) defer f.db.Close() - indexKey := core.NewCollectionIndexKey(immutable.Some(f.users.ID()), "users_name_index") + indexKey := keys.NewCollectionIndexKey(immutable.Some(f.users.ID()), "users_name_index") key := ds.NewKey(indexKey.ToString() + "/invalid") desc := client.IndexDescription{ Name: "some_index_name", @@ -741,7 +741,7 @@ func TestGetCollectionIndexes_IfInvalidIndexIsStored_ReturnError(t *testing.T) { f := newIndexTestFixture(t) defer f.db.Close() - indexKey := core.NewCollectionIndexKey(immutable.Some(f.users.ID()), "users_name_index") + indexKey := keys.NewCollectionIndexKey(immutable.Some(f.users.ID()), "users_name_index") err := f.txn.Systemstore().Put(f.ctx, indexKey.ToDS(), []byte("invalid")) assert.NoError(t, err) @@ -998,7 +998,7 @@ func TestDropIndex_ShouldDeleteIndex(t *testing.T) { err := f.dropIndex(usersColName, desc.Name) assert.NoError(t, err) - indexKey := core.NewCollectionIndexKey(immutable.Some(f.users.ID()), desc.Name) + indexKey := keys.NewCollectionIndexKey(immutable.Some(f.users.ID()), desc.Name) _, err = f.txn.Systemstore().Get(f.ctx, indexKey.ToDS()) assert.Error(t, err) } diff --git a/internal/db/indexed_docs_test.go b/internal/db/indexed_docs_test.go index 4cd591a536..2c6ce0af53 100644 --- a/internal/db/indexed_docs_test.go +++ b/internal/db/indexed_docs_test.go @@ -31,6 +31,7 @@ import ( "github.com/sourcenetwork/defradb/internal/core" "github.com/sourcenetwork/defradb/internal/db/fetcher" fetcherMocks "github.com/sourcenetwork/defradb/internal/db/fetcher/mocks" + "github.com/sourcenetwork/defradb/internal/keys" "github.com/sourcenetwork/defradb/internal/planner/mapper" ) @@ -154,8 +155,8 @@ func (b *indexKeyBuilder) Unique() *indexKeyBuilder { return b } -func (b *indexKeyBuilder) Build() core.IndexDataStoreKey { - key := core.IndexDataStoreKey{} +func (b *indexKeyBuilder) Build() keys.IndexDataStoreKey { + key := keys.IndexDataStoreKey{} if b.colName == "" { return key @@ -238,11 +239,11 @@ indexLoop: if i < len(b.descendingFields) { descending = b.descendingFields[i] } - key.Fields = append(key.Fields, core.IndexedField{Value: val, Descending: descending}) + key.Fields = append(key.Fields, keys.IndexedField{Value: val, Descending: descending}) } if !b.isUnique || hasNilValue { - key.Fields = append(key.Fields, core.IndexedField{Value: client.NewNormalString(b.doc.ID().String())}) + key.Fields = append(key.Fields, keys.IndexedField{Value: client.NewNormalString(b.doc.ID().String())}) } } @@ -287,7 +288,7 @@ func (f *indexTestFixture) stubSystemStore(systemStoreOn *mocks.DSReaderWriter_E indexOnNameDescData, err := json.Marshal(desc) require.NoError(f.t, err) - colIndexKey := core.NewCollectionIndexKey(immutable.Some(f.users.ID()), "") + colIndexKey := keys.NewCollectionIndexKey(immutable.Some(f.users.ID()), "") matchPrefixFunc := func(q query.Query) bool { return q.Prefix == colIndexKey.ToDS().String() } @@ -301,11 +302,11 @@ func (f *indexTestFixture) stubSystemStore(systemStoreOn *mocks.DSReaderWriter_E systemStoreOn.Query(mock.Anything, mock.Anything).Maybe(). Return(mocks.NewQueryResultsWithValues(f.t), nil) - colIndexOnNameKey := core.NewCollectionIndexKey(immutable.Some(f.users.ID()), testUsersColIndexName) + colIndexOnNameKey := keys.NewCollectionIndexKey(immutable.Some(f.users.ID()), testUsersColIndexName) systemStoreOn.Get(mock.Anything, colIndexOnNameKey.ToDS()).Maybe().Return(indexOnNameDescData, nil) if f.users != nil { - sequenceKey := core.NewIndexIDSequenceKey(f.users.ID()) + sequenceKey := keys.NewIndexIDSequenceKey(f.users.ID()) systemStoreOn.Get(mock.Anything, sequenceKey.ToDS()).Maybe().Return([]byte{0, 0, 0, 0, 0, 0, 0, 1}, nil) } @@ -684,7 +685,7 @@ func TestNonUniqueCreate_IfDatastoreFailsToStoreIndex_ReturnError(t *testing.T) doc := f.newUserDoc("John", 21, f.users) f.saveDocToCollection(doc, f.users) - fieldKeyString := core.DataStoreKey{ + fieldKeyString := keys.DataStoreKey{ CollectionRootID: f.users.Description().RootID, }.WithDocID(doc.ID().String()). WithFieldID("1"). diff --git a/internal/db/lens.go b/internal/db/lens.go index bf0c9ce03a..0ad3d55994 100644 --- a/internal/db/lens.go +++ b/internal/db/lens.go @@ -18,8 +18,8 @@ import ( "github.com/sourcenetwork/defradb/client" "github.com/sourcenetwork/defradb/errors" - "github.com/sourcenetwork/defradb/internal/core" "github.com/sourcenetwork/defradb/internal/db/description" + "github.com/sourcenetwork/defradb/internal/keys" ) func (db *db) setMigration(ctx context.Context, cfg client.LensConfig) error { @@ -35,7 +35,7 @@ func (db *db) setMigration(ctx context.Context, cfg client.LensConfig) error { return err } - colSeq, err := db.getSequence(ctx, core.CollectionIDSequenceKey{}) + colSeq, err := db.getSequence(ctx, keys.CollectionIDSequenceKey{}) if err != nil { return err } @@ -125,7 +125,7 @@ func (db *db) setMigration(ctx context.Context, cfg client.LensConfig) error { } if schemaFound { - schemaRootKey := core.NewSchemaRootKey(schema.Root, cfg.DestinationSchemaVersionID) + schemaRootKey := keys.NewSchemaRootKey(schema.Root, cfg.DestinationSchemaVersionID) err = txn.Systemstore().Put(ctx, schemaRootKey.ToDS(), []byte{}) if err != nil { return err diff --git a/internal/db/merge.go b/internal/db/merge.go index 8b08b333e4..d1b96d5b77 100644 --- a/internal/db/merge.go +++ b/internal/db/merge.go @@ -30,6 +30,7 @@ import ( coreblock "github.com/sourcenetwork/defradb/internal/core/block" "github.com/sourcenetwork/defradb/internal/db/base" "github.com/sourcenetwork/defradb/internal/encryption" + "github.com/sourcenetwork/defradb/internal/keys" "github.com/sourcenetwork/defradb/internal/merkle/clock" merklecrdt "github.com/sourcenetwork/defradb/internal/merkle/crdt" ) @@ -133,7 +134,7 @@ type mergeProcessor struct { encBlockLS linking.LinkSystem mCRDTs map[string]merklecrdt.MerkleCRDT col *collection - dsKey core.DataStoreKey + dsKey keys.DataStoreKey // composites is a list of composites that need to be merged. composites *list.List // missingEncryptionBlocks is a list of blocks that we failed to fetch @@ -145,7 +146,7 @@ type mergeProcessor struct { func (db *db) newMergeProcessor( txn datastore.Txn, col *collection, - dsKey core.DataStoreKey, + dsKey keys.DataStoreKey, ) (*mergeProcessor, error) { blockLS := cidlink.DefaultLinkSystem() blockLS.SetReadStorage(txn.Blockstore().AsIPLDStorage()) @@ -433,7 +434,7 @@ func (mp *mergeProcessor) initCRDTForType(field string) (merklecrdt.MerkleCRDT, return mcrdt, nil } - schemaVersionKey := core.CollectionSchemaVersionKey{ + schemaVersionKey := keys.CollectionSchemaVersionKey{ SchemaVersionID: mp.col.Schema().VersionID, CollectionID: mp.col.ID(), } @@ -490,7 +491,7 @@ func getCollectionFromRootSchema(ctx context.Context, db *db, rootSchema string) // getHeadsAsMergeTarget retrieves the heads of the composite DAG for the given document // and returns them as a merge target. -func getHeadsAsMergeTarget(ctx context.Context, txn datastore.Txn, dsKey core.DataStoreKey) (mergeTarget, error) { +func getHeadsAsMergeTarget(ctx context.Context, txn datastore.Txn, dsKey keys.DataStoreKey) (mergeTarget, error) { cids, err := getHeads(ctx, txn, dsKey) if err != nil { @@ -512,7 +513,7 @@ func getHeadsAsMergeTarget(ctx context.Context, txn datastore.Txn, dsKey core.Da } // getHeads retrieves the heads associated with the given datastore key. -func getHeads(ctx context.Context, txn datastore.Txn, dsKey core.DataStoreKey) ([]cid.Cid, error) { +func getHeads(ctx context.Context, txn datastore.Txn, dsKey keys.DataStoreKey) ([]cid.Cid, error) { headset := clock.NewHeadSet(txn.Headstore(), dsKey.ToHeadStoreKey()) cids, _, err := headset.List(ctx) diff --git a/internal/db/p2p_replicator.go b/internal/db/p2p_replicator.go index 7764a6dec9..61c082d210 100644 --- a/internal/db/p2p_replicator.go +++ b/internal/db/p2p_replicator.go @@ -27,6 +27,7 @@ import ( "github.com/sourcenetwork/defradb/event" "github.com/sourcenetwork/defradb/internal/core" coreblock "github.com/sourcenetwork/defradb/internal/core/block" + "github.com/sourcenetwork/defradb/internal/keys" "github.com/sourcenetwork/defradb/internal/merkle/clock" ) @@ -61,7 +62,7 @@ func (db *db) SetReplicator(ctx context.Context, rep client.ReplicatorParams) er storedRep := client.Replicator{} storedSchemas := make(map[string]struct{}) - repKey := core.NewReplicatorKey(rep.Info.ID.String()) + repKey := keys.NewReplicatorKey(rep.Info.ID.String()) hasOldRep, err := txn.Peerstore().Has(ctx, repKey.ToDS()) if err != nil { return err @@ -171,7 +172,7 @@ func (db *db) getDocsHeads( log.ErrorContextE(ctx, "Key channel error", docIDResult.Err) continue } - docID := core.DataStoreKeyFromDocID(docIDResult.ID) + docID := keys.DataStoreKeyFromDocID(docIDResult.ID) headset := clock.NewHeadSet( txn.Headstore(), docID.WithFieldID(core.COMPOSITE_NAMESPACE).ToHeadStoreKey(), @@ -226,7 +227,7 @@ func (db *db) DeleteReplicator(ctx context.Context, rep client.ReplicatorParams) storedRep := client.Replicator{} storedSchemas := make(map[string]struct{}) - repKey := core.NewReplicatorKey(rep.Info.ID.String()) + repKey := keys.NewReplicatorKey(rep.Info.ID.String()) hasOldRep, err := txn.Peerstore().Has(ctx, repKey.ToDS()) if err != nil { return err @@ -257,7 +258,7 @@ func (db *db) DeleteReplicator(ctx context.Context, rep client.ReplicatorParams) collections = append(collections, col) } // make sure the replicator exists in the datastore - key := core.NewReplicatorKey(rep.Info.ID.String()) + key := keys.NewReplicatorKey(rep.Info.ID.String()) _, err = txn.Peerstore().Get(ctx, key.ToDS()) if err != nil { return err @@ -276,7 +277,7 @@ func (db *db) DeleteReplicator(ctx context.Context, rep client.ReplicatorParams) } // Persist the replicator to the store, deleting it if no schemas remain - key := core.NewReplicatorKey(rep.Info.ID.String()) + key := keys.NewReplicatorKey(rep.Info.ID.String()) if len(rep.Collections) == 0 { err := txn.Peerstore().Delete(ctx, key.ToDS()) if err != nil { @@ -312,7 +313,7 @@ func (db *db) GetAllReplicators(ctx context.Context) ([]client.Replicator, error // create collection system prefix query query := query.Query{ - Prefix: core.NewReplicatorKey("").ToString(), + Prefix: keys.NewReplicatorKey("").ToString(), } results, err := txn.Peerstore().Query(ctx, query) if err != nil { @@ -376,7 +377,7 @@ func (db *db) handleReplicatorFailure(ctx context.Context, peerID, docID string) if err != nil { return err } - docIDKey := core.NewReplicatorRetryDocIDKey(peerID, docID) + docIDKey := keys.NewReplicatorRetryDocIDKey(peerID, docID) err = txn.Peerstore().Put(ctx, docIDKey.ToDS(), []byte{}) if err != nil { return err @@ -424,7 +425,7 @@ func updateReplicatorStatus( peerID string, active bool, ) error { - key := core.NewReplicatorKey(peerID) + key := keys.NewReplicatorKey(peerID) repBytes, err := txn.Peerstore().Get(ctx, key.ToDS()) if err != nil { return err @@ -465,7 +466,7 @@ func createIfNotExistsReplicatorRetry( peerID string, retryIntervals []time.Duration, ) error { - key := core.NewReplicatorRetryIDKey(peerID) + key := keys.NewReplicatorRetryIDKey(peerID) exists, err := txn.Peerstore().Has(ctx, key.ToDS()) if err != nil { return err @@ -490,7 +491,7 @@ func createIfNotExistsReplicatorRetry( func (db *db) retryReplicators(ctx context.Context) { q := query.Query{ - Prefix: core.REPLICATOR_RETRY_ID, + Prefix: keys.REPLICATOR_RETRY_ID, } results, err := db.Peerstore().Query(ctx, q) if err != nil { @@ -500,7 +501,7 @@ func (db *db) retryReplicators(ctx context.Context) { defer closeQueryResults(results) now := time.Now() for result := range results.Next() { - key, err := core.NewReplicatorRetryIDKeyFromString(result.Key) + key, err := keys.NewReplicatorRetryIDKeyFromString(result.Key) if err != nil { log.ErrorContextE(ctx, "Failed to parse replicator retry ID key", err) continue @@ -520,7 +521,7 @@ func (db *db) retryReplicators(ctx context.Context) { if now.After(rInfo.NextRetry) && !rInfo.Retrying { // The replicator might have been deleted by the time we reach this point. // If it no longer exists, we delete the retry key and all retry docs. - exists, err := db.Peerstore().Has(ctx, core.NewReplicatorKey(key.PeerID).ToDS()) + exists, err := db.Peerstore().Has(ctx, keys.NewReplicatorKey(key.PeerID).ToDS()) if err != nil { log.ErrorContextE(ctx, "Failed to check if replicator exists", err) continue @@ -543,7 +544,7 @@ func (db *db) retryReplicators(ctx context.Context) { } } -func (db *db) setReplicatorAsRetrying(ctx context.Context, key core.ReplicatorRetryIDKey, rInfo retryInfo) error { +func (db *db) setReplicatorAsRetrying(ctx context.Context, key keys.ReplicatorRetryIDKey, rInfo retryInfo) error { rInfo.Retrying = true rInfo.NumRetries++ b, err := cbor.Marshal(rInfo) @@ -559,7 +560,7 @@ func setReplicatorNextRetry( peerID string, retryIntervals []time.Duration, ) error { - key := core.NewReplicatorRetryIDKey(peerID) + key := keys.NewReplicatorRetryIDKey(peerID) b, err := txn.Peerstore().Get(ctx, key.ToDS()) if err != nil { return err @@ -597,7 +598,7 @@ func setReplicatorNextRetry( // would be a high chance of unnecessary transaction conflicts. func (db *db) retryReplicator(ctx context.Context, peerID string) { log.InfoContext(ctx, "Retrying replicator", corelog.String("PeerID", peerID)) - key := core.NewReplicatorRetryDocIDKey(peerID, "") + key := keys.NewReplicatorRetryDocIDKey(peerID, "") q := query.Query{ Prefix: key.ToString(), } @@ -613,7 +614,7 @@ func (db *db) retryReplicator(ctx context.Context, peerID string) { return default: } - key, err := core.NewReplicatorRetryDocIDKeyFromString(result.Key) + key, err := keys.NewReplicatorRetryDocIDKeyFromString(result.Key) if err != nil { log.ErrorContextE(ctx, "Failed to parse retry doc key", err) continue @@ -645,7 +646,7 @@ func (db *db) retryDoc(ctx context.Context, docID string) error { return err } defer txn.Discard(ctx) - headStoreKey := core.HeadStoreKey{ + headStoreKey := keys.HeadStoreKey{ DocID: docID, FieldID: core.COMPOSITE_NAMESPACE, } @@ -706,7 +707,7 @@ func deleteReplicatorRetryIfNoMoreDocs( txn datastore.Txn, peerID string, ) (bool, error) { - key := core.NewReplicatorRetryDocIDKey(peerID, "") + key := keys.NewReplicatorRetryDocIDKey(peerID, "") q := query.Query{ Prefix: key.ToString(), KeysOnly: true, @@ -721,7 +722,7 @@ func deleteReplicatorRetryIfNoMoreDocs( return false, err } if len(entries) == 0 { - key := core.NewReplicatorRetryIDKey(peerID) + key := keys.NewReplicatorRetryIDKey(peerID) return true, txn.Peerstore().Delete(ctx, key.ToDS()) } return false, nil @@ -729,12 +730,12 @@ func deleteReplicatorRetryIfNoMoreDocs( // deleteReplicatorRetryAndDocs deletes the replicator retry and all retry docs. func (db *db) deleteReplicatorRetryAndDocs(ctx context.Context, peerID string) error { - key := core.NewReplicatorRetryIDKey(peerID) + key := keys.NewReplicatorRetryIDKey(peerID) err := db.Peerstore().Delete(ctx, key.ToDS()) if err != nil { return err } - docKey := core.NewReplicatorRetryDocIDKey(peerID, "") + docKey := keys.NewReplicatorRetryDocIDKey(peerID, "") q := query.Query{ Prefix: docKey.ToString(), KeysOnly: true, @@ -745,7 +746,7 @@ func (db *db) deleteReplicatorRetryAndDocs(ctx context.Context, peerID string) e } defer closeQueryResults(results) for result := range results.Next() { - err = db.Peerstore().Delete(ctx, core.NewReplicatorRetryDocIDKey(peerID, result.Key).ToDS()) + err = db.Peerstore().Delete(ctx, keys.NewReplicatorRetryDocIDKey(peerID, result.Key).ToDS()) if err != nil { return err } diff --git a/internal/db/p2p_schema_root.go b/internal/db/p2p_schema_root.go index 6f85ea682b..2df2dcc931 100644 --- a/internal/db/p2p_schema_root.go +++ b/internal/db/p2p_schema_root.go @@ -19,7 +19,7 @@ import ( "github.com/sourcenetwork/defradb/client" "github.com/sourcenetwork/defradb/event" - "github.com/sourcenetwork/defradb/internal/core" + "github.com/sourcenetwork/defradb/internal/keys" ) const marker = byte(0xff) @@ -64,7 +64,7 @@ func (db *db) AddP2PCollections(ctx context.Context, collectionIDs []string) err // Ensure we can add all the collections to the store on the transaction // before adding to topics. for _, col := range storeCollections { - key := core.NewP2PCollectionKey(col.SchemaRoot()) + key := keys.NewP2PCollectionKey(col.SchemaRoot()) err = txn.Systemstore().Put(ctx, key.ToDS(), []byte{marker}) if err != nil { return err @@ -121,7 +121,7 @@ func (db *db) RemoveP2PCollections(ctx context.Context, collectionIDs []string) // Ensure we can remove all the collections to the store on the transaction // before adding to topics. for _, col := range storeCollections { - key := core.NewP2PCollectionKey(col.SchemaRoot()) + key := keys.NewP2PCollectionKey(col.SchemaRoot()) err = txn.Systemstore().Delete(ctx, key.ToDS()) if err != nil { return err @@ -154,7 +154,7 @@ func (db *db) GetAllP2PCollections(ctx context.Context) ([]string, error) { defer txn.Discard(ctx) query := dsq.Query{ - Prefix: core.NewP2PCollectionKey("").ToString(), + Prefix: keys.NewP2PCollectionKey("").ToString(), } results, err := txn.Systemstore().Query(ctx, query) if err != nil { @@ -163,7 +163,7 @@ func (db *db) GetAllP2PCollections(ctx context.Context) ([]string, error) { collectionIDs := []string{} for result := range results.Next() { - key, err := core.NewP2PCollectionKeyFromString(result.Key) + key, err := keys.NewP2PCollectionKeyFromString(result.Key) if err != nil { return nil, err } diff --git a/internal/db/sequence.go b/internal/db/sequence.go index 06c2989b99..8a9facaa63 100644 --- a/internal/db/sequence.go +++ b/internal/db/sequence.go @@ -17,15 +17,15 @@ import ( ds "github.com/ipfs/go-datastore" "github.com/sourcenetwork/defradb/errors" - "github.com/sourcenetwork/defradb/internal/core" + "github.com/sourcenetwork/defradb/internal/keys" ) type sequence struct { - key core.Key + key keys.Key val uint64 } -func (db *db) getSequence(ctx context.Context, key core.Key) (*sequence, error) { +func (db *db) getSequence(ctx context.Context, key keys.Key) (*sequence, error) { seq := &sequence{ key: key, val: uint64(0), diff --git a/internal/db/view.go b/internal/db/view.go index 9c1e5eaafd..e9a4d0d31c 100644 --- a/internal/db/view.go +++ b/internal/db/view.go @@ -25,6 +25,7 @@ import ( "github.com/sourcenetwork/defradb/client/request" "github.com/sourcenetwork/defradb/internal/core" "github.com/sourcenetwork/defradb/internal/db/description" + "github.com/sourcenetwork/defradb/internal/keys" "github.com/sourcenetwork/defradb/internal/planner" ) @@ -210,7 +211,7 @@ func (db *db) buildViewCache(ctx context.Context, col client.CollectionDefinitio return err } - itemKey := core.NewViewCacheKey(col.Description.RootID, itemID) + itemKey := keys.NewViewCacheKey(col.Description.RootID, itemID) err = txn.Datastore().Put(ctx, itemKey.ToDS(), serializedItem) if err != nil { return err @@ -227,7 +228,7 @@ func (db *db) buildViewCache(ctx context.Context, col client.CollectionDefinitio func (db *db) clearViewCache(ctx context.Context, col client.CollectionDefinition) error { txn := mustGetContextTxn(ctx) - prefix := core.NewViewCacheColPrefix(col.Description.RootID) + prefix := keys.NewViewCacheColPrefix(col.Description.RootID) q, err := txn.Datastore().Query(ctx, query.Query{ Prefix: prefix.ToString(), diff --git a/internal/keys/datastore.go b/internal/keys/datastore.go new file mode 100644 index 0000000000..7caaf64e94 --- /dev/null +++ b/internal/keys/datastore.go @@ -0,0 +1,15 @@ +// Copyright 2024 Democratized Data Foundation +// +// Use of this software is governed by the Business Source License +// included in the file licenses/BSL.txt. +// +// As of the Change Date specified in that file, in accordance with +// the Business Source License, use of this software will be governed +// by the Apache License, Version 2.0, included in the file +// licenses/APL.txt. + +package keys + +const ( + COLLECTION_VIEW_ITEMS = "/collection/vi" +) diff --git a/internal/keys/datastore_doc.go b/internal/keys/datastore_doc.go new file mode 100644 index 0000000000..1665fb7ea3 --- /dev/null +++ b/internal/keys/datastore_doc.go @@ -0,0 +1,292 @@ +// Copyright 2024 Democratized Data Foundation +// +// Use of this software is governed by the Business Source License +// included in the file licenses/BSL.txt. +// +// As of the Change Date specified in that file, in accordance with +// the Business Source License, use of this software will be governed +// by the Apache License, Version 2.0, included in the file +// licenses/APL.txt. + +package keys + +import ( + "strconv" + + ds "github.com/ipfs/go-datastore" + + "github.com/sourcenetwork/defradb/client" + "github.com/sourcenetwork/defradb/internal/encoding" +) + +// InstanceType is a type that represents the type of instance. +type InstanceType string + +const ( + // ValueKey is a type that represents a value instance. + ValueKey = InstanceType("v") + // PriorityKey is a type that represents a priority instance. + PriorityKey = InstanceType("p") + // DeletedKey is a type that represents a deleted document. + DeletedKey = InstanceType("d") + + DATASTORE_DOC_VERSION_FIELD_ID = "v" +) + +// DataStoreKey is a type that represents a key in the database. +type DataStoreKey struct { + CollectionRootID uint32 + InstanceType InstanceType + DocID string + FieldID string +} + +var _ Key = (*DataStoreKey)(nil) + +// Creates a new DataStoreKey from a string as best as it can, +// splitting the input using '/' as a field deliminator. It assumes +// that the input string is in the following format: +// +// /[CollectionRootId]/[InstanceType]/[DocID]/[FieldId] +// +// Any properties before the above (assuming a '/' deliminator) are ignored +func NewDataStoreKey(key string) (DataStoreKey, error) { + return DecodeDataStoreKey([]byte(key)) +} + +func MustNewDataStoreKey(key string) DataStoreKey { + dsKey, err := NewDataStoreKey(key) + if err != nil { + panic(err) + } + return dsKey +} + +func DataStoreKeyFromDocID(docID client.DocID) DataStoreKey { + return DataStoreKey{ + DocID: docID.String(), + } +} + +func (k DataStoreKey) WithValueFlag() DataStoreKey { + newKey := k + newKey.InstanceType = ValueKey + return newKey +} + +func (k DataStoreKey) WithPriorityFlag() DataStoreKey { + newKey := k + newKey.InstanceType = PriorityKey + return newKey +} + +func (k DataStoreKey) WithDeletedFlag() DataStoreKey { + newKey := k + newKey.InstanceType = DeletedKey + return newKey +} + +func (k DataStoreKey) WithDocID(docID string) DataStoreKey { + newKey := k + newKey.DocID = docID + return newKey +} + +func (k DataStoreKey) WithInstanceInfo(key DataStoreKey) DataStoreKey { + newKey := k + newKey.DocID = key.DocID + newKey.FieldID = key.FieldID + newKey.InstanceType = key.InstanceType + return newKey +} + +func (k DataStoreKey) WithFieldID(fieldID string) DataStoreKey { + newKey := k + newKey.FieldID = fieldID + return newKey +} + +func (k DataStoreKey) ToHeadStoreKey() HeadStoreKey { + return HeadStoreKey{ + DocID: k.DocID, + FieldID: k.FieldID, + } +} + +func (k DataStoreKey) ToString() string { + return string(k.Bytes()) +} + +func (k DataStoreKey) Bytes() []byte { + return EncodeDataStoreKey(&k) +} + +func (k DataStoreKey) ToDS() ds.Key { + return ds.NewKey(k.ToString()) +} + +func (k DataStoreKey) PrettyPrint() string { + var result string + + if k.CollectionRootID != 0 { + result = result + "/" + strconv.Itoa(int(k.CollectionRootID)) + } + if k.InstanceType != "" { + result = result + "/" + string(k.InstanceType) + } + if k.DocID != "" { + result = result + "/" + k.DocID + } + if k.FieldID != "" { + result = result + "/" + k.FieldID + } + + return result +} + +func (k DataStoreKey) Equal(other DataStoreKey) bool { + return k.CollectionRootID == other.CollectionRootID && + k.DocID == other.DocID && + k.FieldID == other.FieldID && + k.InstanceType == other.InstanceType +} + +func (k DataStoreKey) ToPrimaryDataStoreKey() PrimaryDataStoreKey { + return PrimaryDataStoreKey{ + CollectionRootID: k.CollectionRootID, + DocID: k.DocID, + } +} + +// PrefixEnd determines the end key given key as a prefix, that is the key that sorts precisely +// behind all keys starting with prefix: "1" is added to the final byte and the carry propagated. +// The special cases of nil and KeyMin always returns KeyMax. +func (k DataStoreKey) PrefixEnd() DataStoreKey { + newKey := k + + if k.FieldID != "" { + newKey.FieldID = string(bytesPrefixEnd([]byte(k.FieldID))) + return newKey + } + if k.DocID != "" { + newKey.DocID = string(bytesPrefixEnd([]byte(k.DocID))) + return newKey + } + if k.InstanceType != "" { + newKey.InstanceType = InstanceType(bytesPrefixEnd([]byte(k.InstanceType))) + return newKey + } + if k.CollectionRootID != 0 { + newKey.CollectionRootID = k.CollectionRootID + 1 + return newKey + } + + return newKey +} + +// FieldIDAsUint extracts the Field Identifier from the Key. +// In a Primary index, the last key path is the FieldIDAsUint. +// This may be different in Secondary Indexes. +// An error is returned if it can't correct convert the field to a uint32. +func (k DataStoreKey) FieldIDAsUint() (uint32, error) { + fieldID, err := strconv.Atoi(k.FieldID) + if err != nil { + return 0, NewErrFailedToGetFieldIdOfKey(err) + } + return uint32(fieldID), nil +} + +func bytesPrefixEnd(b []byte) []byte { + end := make([]byte, len(b)) + copy(end, b) + for i := len(end) - 1; i >= 0; i-- { + end[i] = end[i] + 1 + if end[i] != 0 { + return end[:i+1] + } + } + // This statement will only be reached if the key is already a + // maximal byte string (i.e. already \xff...). + return b +} + +// DecodeDataStoreKey decodes a store key into a [DataStoreKey]. +func DecodeDataStoreKey(data []byte) (DataStoreKey, error) { + if len(data) == 0 { + return DataStoreKey{}, ErrEmptyKey + } + + if data[0] != '/' { + return DataStoreKey{}, ErrInvalidKey + } + data = data[1:] + + data, colRootID, err := encoding.DecodeUvarintAscending(data) + if err != nil { + return DataStoreKey{}, err + } + + var instanceType InstanceType + if len(data) > 1 { + if data[0] == '/' { + data = data[1:] + } + instanceType = InstanceType(data[0]) + data = data[1:] + } + + const docKeyLength int = 40 + var docID string + if len(data) > docKeyLength { + if data[0] == '/' { + data = data[1:] + } + docID = string(data[:docKeyLength]) + data = data[docKeyLength:] + } + + var fieldID string + if len(data) > 1 { + if data[0] == '/' { + data = data[1:] + } + // Todo: This should be encoded/decoded properly in + // https://github.com/sourcenetwork/defradb/issues/2818 + fieldID = string(data) + } + + return DataStoreKey{ + CollectionRootID: uint32(colRootID), + InstanceType: (instanceType), + DocID: docID, + FieldID: fieldID, + }, nil +} + +// EncodeDataStoreKey encodes a [*DataStoreKey] to a byte array suitable for sorting in the store. +func EncodeDataStoreKey(key *DataStoreKey) []byte { + var result []byte + + if key.CollectionRootID != 0 { + result = encoding.EncodeUvarintAscending([]byte{'/'}, uint64(key.CollectionRootID)) + } + + if key.InstanceType != "" { + result = append(result, '/') + result = append(result, []byte(string(key.InstanceType))...) + } + + if key.DocID != "" { + result = append(result, '/') + result = append(result, []byte(key.DocID)...) + } + + if key.FieldID != "" { + result = append(result, '/') + // Todo: This should be encoded/decoded properly in + // https://github.com/sourcenetwork/defradb/issues/2818 + result = append(result, []byte(key.FieldID)...) + } + + return result +} diff --git a/internal/keys/datastore_index.go b/internal/keys/datastore_index.go new file mode 100644 index 0000000000..83ed77b364 --- /dev/null +++ b/internal/keys/datastore_index.go @@ -0,0 +1,191 @@ +// Copyright 2024 Democratized Data Foundation +// +// Use of this software is governed by the Business Source License +// included in the file licenses/BSL.txt. +// +// As of the Change Date specified in that file, in accordance with +// the Business Source License, use of this software will be governed +// by the Apache License, Version 2.0, included in the file +// licenses/APL.txt. + +package keys + +import ( + ds "github.com/ipfs/go-datastore" + + "github.com/sourcenetwork/defradb/client" + "github.com/sourcenetwork/defradb/internal/encoding" +) + +// IndexedField contains information necessary for storing a single +// value of a field in an index. +type IndexedField struct { + // Value is the value of the field in the index + Value client.NormalValue + // Descending is true if the field is sorted in descending order + Descending bool +} + +// IndexDataStoreKey is key of an indexed document in the database. +type IndexDataStoreKey struct { + // CollectionID is the id of the collection + CollectionID uint32 + // IndexID is the id of the index + IndexID uint32 + // Fields is the values of the fields in the index + Fields []IndexedField +} + +var _ Key = (*IndexDataStoreKey)(nil) + +// NewIndexDataStoreKey creates a new IndexDataStoreKey from a collection ID, index ID and fields. +// It also validates values of the fields. +func NewIndexDataStoreKey(collectionID, indexID uint32, fields []IndexedField) IndexDataStoreKey { + return IndexDataStoreKey{ + CollectionID: collectionID, + IndexID: indexID, + Fields: fields, + } +} + +// Bytes returns the byte representation of the key +func (k *IndexDataStoreKey) Bytes() []byte { + return EncodeIndexDataStoreKey(k) +} + +// ToDS returns the datastore key +func (k *IndexDataStoreKey) ToDS() ds.Key { + return ds.NewKey(k.ToString()) +} + +// ToString returns the string representation of the key +// It is in the following format: +// /[CollectionID]/[IndexID]/[FieldValue](/[FieldValue]...) +// If while composing the string from left to right, a component +// is empty, the string is returned up to that point +func (k *IndexDataStoreKey) ToString() string { + return string(k.Bytes()) +} + +// Equal returns true if the two keys are equal +func (k *IndexDataStoreKey) Equal(other IndexDataStoreKey) bool { + if k.CollectionID != other.CollectionID || k.IndexID != other.IndexID { + return false + } + + if len(k.Fields) != len(other.Fields) { + return false + } + + for i, field := range k.Fields { + if !field.Value.Equal(other.Fields[i].Value) || field.Descending != other.Fields[i].Descending { + return false + } + } + + return true +} + +// DecodeIndexDataStoreKey decodes a IndexDataStoreKey from bytes. +// It expects the input bytes is in the following format: +// +// /[CollectionID]/[IndexID]/[FieldValue](/[FieldValue]...) +// +// Where [CollectionID] and [IndexID] are integers +// +// All values of the fields are converted to standardized Defra Go type +// according to fields description. +func DecodeIndexDataStoreKey( + data []byte, + indexDesc *client.IndexDescription, + fields []client.FieldDefinition, +) (IndexDataStoreKey, error) { + if len(data) == 0 { + return IndexDataStoreKey{}, ErrEmptyKey + } + + if data[0] != '/' { + return IndexDataStoreKey{}, ErrInvalidKey + } + data = data[1:] + + data, colID, err := encoding.DecodeUvarintAscending(data) + if err != nil { + return IndexDataStoreKey{}, err + } + + key := IndexDataStoreKey{CollectionID: uint32(colID)} + + if data[0] != '/' { + return IndexDataStoreKey{}, ErrInvalidKey + } + data = data[1:] + + data, indID, err := encoding.DecodeUvarintAscending(data) + if err != nil { + return IndexDataStoreKey{}, err + } + key.IndexID = uint32(indID) + + if len(data) == 0 { + return key, nil + } + + for len(data) > 0 { + if data[0] != '/' { + return IndexDataStoreKey{}, ErrInvalidKey + } + data = data[1:] + + i := len(key.Fields) + descending := false + var kind client.FieldKind = client.FieldKind_DocID + // If the key has more values encoded then fields on the index description, the last + // value must be the docID and we treat it as a string. + if i < len(indexDesc.Fields) { + descending = indexDesc.Fields[i].Descending + kind = fields[i].Kind + } else if i > len(indexDesc.Fields) { + return IndexDataStoreKey{}, ErrInvalidKey + } + + if kind != nil && kind.IsArray() { + if arrKind, ok := kind.(client.ScalarArrayKind); ok { + kind = arrKind.SubKind() + } + } + + var val client.NormalValue + data, val, err = encoding.DecodeFieldValue(data, descending, kind) + if err != nil { + return IndexDataStoreKey{}, err + } + + key.Fields = append(key.Fields, IndexedField{Value: val, Descending: descending}) + } + + return key, nil +} + +// EncodeIndexDataStoreKey encodes a IndexDataStoreKey to bytes to be stored as a key +// for secondary indexes. +func EncodeIndexDataStoreKey(key *IndexDataStoreKey) []byte { + if key.CollectionID == 0 { + return []byte{} + } + + b := encoding.EncodeUvarintAscending([]byte{'/'}, uint64(key.CollectionID)) + + if key.IndexID == 0 { + return b + } + b = append(b, '/') + b = encoding.EncodeUvarintAscending(b, uint64(key.IndexID)) + + for _, field := range key.Fields { + b = append(b, '/') + b = encoding.EncodeFieldValue(b, field.Value, field.Descending) + } + + return b +} diff --git a/internal/keys/datastore_primary_doc.go b/internal/keys/datastore_primary_doc.go new file mode 100644 index 0000000000..6f531d3c7a --- /dev/null +++ b/internal/keys/datastore_primary_doc.go @@ -0,0 +1,57 @@ +// Copyright 2024 Democratized Data Foundation +// +// Use of this software is governed by the Business Source License +// included in the file licenses/BSL.txt. +// +// As of the Change Date specified in that file, in accordance with +// the Business Source License, use of this software will be governed +// by the Apache License, Version 2.0, included in the file +// licenses/APL.txt. + +package keys + +import ( + "fmt" + + ds "github.com/ipfs/go-datastore" +) + +const ( + PRIMARY_KEY = "/pk" +) + +type PrimaryDataStoreKey struct { + CollectionRootID uint32 + DocID string +} + +var _ Key = (*PrimaryDataStoreKey)(nil) + +func (k PrimaryDataStoreKey) ToDataStoreKey() DataStoreKey { + return DataStoreKey{ + CollectionRootID: k.CollectionRootID, + DocID: k.DocID, + } +} + +func (k PrimaryDataStoreKey) Bytes() []byte { + return []byte(k.ToString()) +} + +func (k PrimaryDataStoreKey) ToDS() ds.Key { + return ds.NewKey(k.ToString()) +} + +func (k PrimaryDataStoreKey) ToString() string { + result := "" + + if k.CollectionRootID != 0 { + result = result + "/" + fmt.Sprint(k.CollectionRootID) + } + result = result + PRIMARY_KEY + if k.DocID != "" { + result = result + "/" + k.DocID + } + + return result +} diff --git a/internal/keys/datastore_view_item.go b/internal/keys/datastore_view_item.go new file mode 100644 index 0000000000..b1280db327 --- /dev/null +++ b/internal/keys/datastore_view_item.go @@ -0,0 +1,87 @@ +// Copyright 2024 Democratized Data Foundation +// +// Use of this software is governed by the Business Source License +// included in the file licenses/BSL.txt. +// +// As of the Change Date specified in that file, in accordance with +// the Business Source License, use of this software will be governed +// by the Apache License, Version 2.0, included in the file +// licenses/APL.txt. + +package keys + +import ( + "strconv" + + ds "github.com/ipfs/go-datastore" + + "github.com/sourcenetwork/defradb/internal/encoding" +) + +// ViewCacheKey is a trimmed down [DataStoreKey] used for caching the results +// of View items. +// +// It is stored in the format `/collection/vi/[CollectionRootID]/[ItemID]`. It points to the +// full serialized View item. +type ViewCacheKey struct { + // CollectionRootID is the Root of the Collection that this item belongs to. + CollectionRootID uint32 + + // ItemID is the unique (to this CollectionRootID) ID of the View item. + // + // For now this is essentially just the index of the item in the result-set, however + // that is likely to change in the near future. + ItemID uint +} + +var _ Key = (*ViewCacheKey)(nil) + +func NewViewCacheColPrefix(rootID uint32) ViewCacheKey { + return ViewCacheKey{ + CollectionRootID: rootID, + } +} + +func NewViewCacheKey(rootID uint32, itemID uint) ViewCacheKey { + return ViewCacheKey{ + CollectionRootID: rootID, + ItemID: itemID, + } +} + +func (k ViewCacheKey) ToString() string { + return string(k.Bytes()) +} + +func (k ViewCacheKey) Bytes() []byte { + result := []byte(COLLECTION_VIEW_ITEMS) + + if k.CollectionRootID != 0 { + result = append(result, '/') + result = encoding.EncodeUvarintAscending(result, uint64(k.CollectionRootID)) + } + + if k.ItemID != 0 { + result = append(result, '/') + result = encoding.EncodeUvarintAscending(result, uint64(k.ItemID)) + } + + return result +} + +func (k ViewCacheKey) ToDS() ds.Key { + return ds.NewKey(k.ToString()) +} + +func (k ViewCacheKey) PrettyPrint() string { + result := COLLECTION_VIEW_ITEMS + + if k.CollectionRootID != 0 { + result = result + "/" + strconv.Itoa(int(k.CollectionRootID)) + } + if k.ItemID != 0 { + result = result + "/" + strconv.Itoa(int(k.ItemID)) + } + + return result +} diff --git a/internal/keys/errors.go b/internal/keys/errors.go new file mode 100644 index 0000000000..bd6bf7bb8e --- /dev/null +++ b/internal/keys/errors.go @@ -0,0 +1,44 @@ +// Copyright 2024 Democratized Data Foundation +// +// Use of this software is governed by the Business Source License +// included in the file licenses/BSL.txt. +// +// As of the Change Date specified in that file, in accordance with +// the Business Source License, use of this software will be governed +// by the Apache License, Version 2.0, included in the file +// licenses/APL.txt. + +package keys + +import ( + "github.com/sourcenetwork/defradb/errors" +) + +const ( + errFailedToGetFieldIdOfKey string = "failed to get FieldID of Key" + errInvalidFieldIndex string = "invalid field index" + errInvalidFieldValue string = "invalid field value" +) + +var ( + ErrFailedToGetFieldIdOfKey = errors.New(errFailedToGetFieldIdOfKey) + ErrEmptyKey = errors.New("received empty key string") + ErrInvalidKey = errors.New("invalid key string") + ErrInvalidFieldIndex = errors.New(errInvalidFieldIndex) + ErrInvalidFieldValue = errors.New(errInvalidFieldValue) +) + +// NewErrFailedToGetFieldIdOfKey returns the error indicating failure to get FieldID of Key. +func NewErrFailedToGetFieldIdOfKey(inner error) error { + return errors.Wrap(errFailedToGetFieldIdOfKey, inner) +} + +// NewErrInvalidFieldIndex returns the error indicating invalid field index. +func NewErrInvalidFieldIndex(i int) error { + return errors.New(errInvalidFieldIndex, errors.NewKV("index", i)) +} + +// NewErrInvalidFieldValue returns the error indicating invalid field value. +func NewErrInvalidFieldValue(reason string) error { + return errors.New(errInvalidFieldValue, errors.NewKV("Reason", reason)) +} diff --git a/internal/keys/headstore_doc.go b/internal/keys/headstore_doc.go new file mode 100644 index 0000000000..5d3ec2306e --- /dev/null +++ b/internal/keys/headstore_doc.go @@ -0,0 +1,94 @@ +// Copyright 2024 Democratized Data Foundation +// +// Use of this software is governed by the Business Source License +// included in the file licenses/BSL.txt. +// +// As of the Change Date specified in that file, in accordance with +// the Business Source License, use of this software will be governed +// by the Apache License, Version 2.0, included in the file +// licenses/APL.txt. + +package keys + +import ( + "strings" + + "github.com/ipfs/go-cid" + ds "github.com/ipfs/go-datastore" +) + +type HeadStoreKey struct { + DocID string + FieldID string //can be 'C' + Cid cid.Cid +} + +var _ Key = (*HeadStoreKey)(nil) + +// Creates a new HeadStoreKey from a string as best as it can, +// splitting the input using '/' as a field deliminator. It assumes +// that the input string is in the following format: +// +// /[DocID]/[FieldId]/[Cid] +// +// Any properties before the above are ignored +func NewHeadStoreKey(key string) (HeadStoreKey, error) { + elements := strings.Split(key, "/") + if len(elements) != 4 { + return HeadStoreKey{}, ErrInvalidKey + } + + cid, err := cid.Decode(elements[3]) + if err != nil { + return HeadStoreKey{}, err + } + + return HeadStoreKey{ + // elements[0] is empty (key has leading '/') + DocID: elements[1], + FieldID: elements[2], + Cid: cid, + }, nil +} + +func (k HeadStoreKey) WithDocID(docID string) HeadStoreKey { + newKey := k + newKey.DocID = docID + return newKey +} + +func (k HeadStoreKey) WithCid(c cid.Cid) HeadStoreKey { + newKey := k + newKey.Cid = c + return newKey +} + +func (k HeadStoreKey) WithFieldID(fieldID string) HeadStoreKey { + newKey := k + newKey.FieldID = fieldID + return newKey +} + +func (k HeadStoreKey) ToString() string { + var result string + + if k.DocID != "" { + result = result + "/" + k.DocID + } + if k.FieldID != "" { + result = result + "/" + k.FieldID + } + if k.Cid.Defined() { + result = result + "/" + k.Cid.String() + } + + return result +} + +func (k HeadStoreKey) Bytes() []byte { + return []byte(k.ToString()) +} + +func (k HeadStoreKey) ToDS() ds.Key { + return ds.NewKey(k.ToString()) +} diff --git a/internal/keys/key.go b/internal/keys/key.go new file mode 100644 index 0000000000..893b9790b4 --- /dev/null +++ b/internal/keys/key.go @@ -0,0 +1,22 @@ +// Copyright 2024 Democratized Data Foundation +// +// Use of this software is governed by the Business Source License +// included in the file licenses/BSL.txt. +// +// As of the Change Date specified in that file, in accordance with +// the Business Source License, use of this software will be governed +// by the Apache License, Version 2.0, included in the file +// licenses/APL.txt. + +package keys + +import ( + ds "github.com/ipfs/go-datastore" +) + +// Key is an interface that represents a key in the database. +type Key interface { + ToString() string + Bytes() []byte + ToDS() ds.Key +} diff --git a/internal/core/key_test.go b/internal/keys/key_test.go similarity index 98% rename from internal/core/key_test.go rename to internal/keys/key_test.go index 4cdb46b72d..37ce364183 100644 --- a/internal/core/key_test.go +++ b/internal/keys/key_test.go @@ -8,7 +8,7 @@ // by the Apache License, Version 2.0, included in the file // licenses/APL.txt. -package core +package keys import ( "fmt" @@ -95,7 +95,7 @@ func encodeKey(colID, indexID uint32, fieldParts ...any) []byte { } for i := 0; i < len(fieldParts)/partSize; i++ { b = append(b, '/') - isDescending := fieldParts[i*partSize+1].(bool) + isDescending, _ := fieldParts[i*partSize+1].(bool) if fieldParts[i*partSize] == nil { if isDescending { b = encoding.EncodeNullDescending(b) @@ -103,10 +103,11 @@ func encodeKey(colID, indexID uint32, fieldParts ...any) []byte { b = encoding.EncodeNullAscending(b) } } else { + v, _ := fieldParts[i*partSize].(int) if isDescending { - b = encoding.EncodeUvarintDescending(b, uint64(fieldParts[i*partSize].(int))) + b = encoding.EncodeUvarintDescending(b, uint64(v)) } else { - b = encoding.EncodeUvarintAscending(b, uint64(fieldParts[i*partSize].(int))) + b = encoding.EncodeUvarintAscending(b, uint64(v)) } } } diff --git a/internal/keys/peerstore.go b/internal/keys/peerstore.go new file mode 100644 index 0000000000..7fa628c833 --- /dev/null +++ b/internal/keys/peerstore.go @@ -0,0 +1,17 @@ +// Copyright 2024 Democratized Data Foundation +// +// Use of this software is governed by the Business Source License +// included in the file licenses/BSL.txt. +// +// As of the Change Date specified in that file, in accordance with +// the Business Source License, use of this software will be governed +// by the Apache License, Version 2.0, included in the file +// licenses/APL.txt. + +package keys + +const ( + REPLICATOR = "/rep/id" + REPLICATOR_RETRY_ID = "/rep/retry/id" + REPLICATOR_RETRY_DOC = "/rep/retry/doc" +) diff --git a/internal/keys/peerstore_replicator.go b/internal/keys/peerstore_replicator.go new file mode 100644 index 0000000000..c54113d6a5 --- /dev/null +++ b/internal/keys/peerstore_replicator.go @@ -0,0 +1,41 @@ +// Copyright 2024 Democratized Data Foundation +// +// Use of this software is governed by the Business Source License +// included in the file licenses/BSL.txt. +// +// As of the Change Date specified in that file, in accordance with +// the Business Source License, use of this software will be governed +// by the Apache License, Version 2.0, included in the file +// licenses/APL.txt. + +package keys + +import ds "github.com/ipfs/go-datastore" + +type ReplicatorKey struct { + ReplicatorID string +} + +var _ Key = (*ReplicatorKey)(nil) + +func NewReplicatorKey(id string) ReplicatorKey { + return ReplicatorKey{ReplicatorID: id} +} + +func (k ReplicatorKey) ToString() string { + result := REPLICATOR + + if k.ReplicatorID != "" { + result = result + "/" + k.ReplicatorID + } + + return result +} + +func (k ReplicatorKey) Bytes() []byte { + return []byte(k.ToString()) +} + +func (k ReplicatorKey) ToDS() ds.Key { + return ds.NewKey(k.ToString()) +} diff --git a/internal/keys/peerstore_replicator_retry.go b/internal/keys/peerstore_replicator_retry.go new file mode 100644 index 0000000000..380676073a --- /dev/null +++ b/internal/keys/peerstore_replicator_retry.go @@ -0,0 +1,54 @@ +// Copyright 2024 Democratized Data Foundation +// +// Use of this software is governed by the Business Source License +// included in the file licenses/BSL.txt. +// +// As of the Change Date specified in that file, in accordance with +// the Business Source License, use of this software will be governed +// by the Apache License, Version 2.0, included in the file +// licenses/APL.txt. + +package keys + +import ( + "strings" + + ds "github.com/ipfs/go-datastore" + + "github.com/sourcenetwork/defradb/errors" +) + +type ReplicatorRetryIDKey struct { + PeerID string +} + +var _ Key = (*ReplicatorRetryIDKey)(nil) + +func NewReplicatorRetryIDKey(peerID string) ReplicatorRetryIDKey { + return ReplicatorRetryIDKey{ + PeerID: peerID, + } +} + +// NewReplicatorRetryIDKeyFromString creates a new [ReplicatorRetryIDKey] from a string. +// +// It expects the input string to be in the format `/rep/retry/id/[PeerID]`. +func NewReplicatorRetryIDKeyFromString(key string) (ReplicatorRetryIDKey, error) { + peerID := strings.TrimPrefix(key, REPLICATOR_RETRY_ID+"/") + if peerID == "" { + return ReplicatorRetryIDKey{}, errors.WithStack(ErrInvalidKey, errors.NewKV("Key", key)) + } + return NewReplicatorRetryIDKey(peerID), nil +} + +func (k ReplicatorRetryIDKey) ToString() string { + return REPLICATOR_RETRY_ID + "/" + k.PeerID +} + +func (k ReplicatorRetryIDKey) Bytes() []byte { + return []byte(k.ToString()) +} + +func (k ReplicatorRetryIDKey) ToDS() ds.Key { + return ds.NewKey(k.ToString()) +} diff --git a/internal/keys/peerstore_replicator_retry_doc.go b/internal/keys/peerstore_replicator_retry_doc.go new file mode 100644 index 0000000000..c77fc7617a --- /dev/null +++ b/internal/keys/peerstore_replicator_retry_doc.go @@ -0,0 +1,61 @@ +// Copyright 2024 Democratized Data Foundation +// +// Use of this software is governed by the Business Source License +// included in the file licenses/BSL.txt. +// +// As of the Change Date specified in that file, in accordance with +// the Business Source License, use of this software will be governed +// by the Apache License, Version 2.0, included in the file +// licenses/APL.txt. + +package keys + +import ( + "strings" + + ds "github.com/ipfs/go-datastore" + + "github.com/sourcenetwork/defradb/errors" +) + +type ReplicatorRetryDocIDKey struct { + PeerID string + DocID string +} + +var _ Key = (*ReplicatorRetryDocIDKey)(nil) + +func NewReplicatorRetryDocIDKey(peerID, docID string) ReplicatorRetryDocIDKey { + return ReplicatorRetryDocIDKey{ + PeerID: peerID, + DocID: docID, + } +} + +// NewReplicatorRetryDocIDKeyFromString creates a new [ReplicatorRetryDocIDKey] from a string. +// +// It expects the input string to be in the format `/rep/retry/doc/[PeerID]/[DocID]`. +func NewReplicatorRetryDocIDKeyFromString(key string) (ReplicatorRetryDocIDKey, error) { + trimmedKey := strings.TrimPrefix(key, REPLICATOR_RETRY_DOC+"/") + keyArr := strings.Split(trimmedKey, "/") + if len(keyArr) != 2 { + return ReplicatorRetryDocIDKey{}, errors.WithStack(ErrInvalidKey, errors.NewKV("Key", key)) + } + return NewReplicatorRetryDocIDKey(keyArr[0], keyArr[1]), nil +} + +func (k ReplicatorRetryDocIDKey) ToString() string { + keyString := REPLICATOR_RETRY_DOC + "/" + k.PeerID + if k.DocID != "" { + keyString += "/" + k.DocID + } + return keyString +} + +func (k ReplicatorRetryDocIDKey) Bytes() []byte { + return []byte(k.ToString()) +} + +func (k ReplicatorRetryDocIDKey) ToDS() ds.Key { + return ds.NewKey(k.ToString()) +} diff --git a/internal/keys/systemstore.go b/internal/keys/systemstore.go new file mode 100644 index 0000000000..d3f82a8af5 --- /dev/null +++ b/internal/keys/systemstore.go @@ -0,0 +1,26 @@ +// Copyright 2024 Democratized Data Foundation +// +// Use of this software is governed by the Business Source License +// included in the file licenses/BSL.txt. +// +// As of the Change Date specified in that file, in accordance with +// the Business Source License, use of this software will be governed +// by the Apache License, Version 2.0, included in the file +// licenses/APL.txt. + +package keys + +const ( + COLLECTION = "collection" + COLLECTION_ROOT = "/collection/root" + COLLECTION_INDEX = "/collection/index" + COLLECTION_NAME = "/collection/name" + COLLECTION_SCHEMA_VERSION = "/collection/version" + COLLECTION_ID = "/collection/id" + P2P_COLLECTION = "/p2p/collection" + SCHEMA_VERSION_ROOT = "/schema/version/r" + SCHEMA_VERSION = "/schema/version/v" + COLLECTION_SEQ = "/seq/collection" + INDEX_ID_SEQ = "/seq/index" + FIELD_ID_SEQ = "/seq/field" +) diff --git a/internal/keys/systemstore_collection.go b/internal/keys/systemstore_collection.go new file mode 100644 index 0000000000..675ab74e76 --- /dev/null +++ b/internal/keys/systemstore_collection.go @@ -0,0 +1,44 @@ +// Copyright 2024 Democratized Data Foundation +// +// Use of this software is governed by the Business Source License +// included in the file licenses/BSL.txt. +// +// As of the Change Date specified in that file, in accordance with +// the Business Source License, use of this software will be governed +// by the Apache License, Version 2.0, included in the file +// licenses/APL.txt. + +package keys + +import ( + "fmt" + "strconv" + + ds "github.com/ipfs/go-datastore" +) + +// CollectionKey points to the json serialized description of the +// the collection of the given ID. +type CollectionKey struct { + CollectionID uint32 +} + +var _ Key = (*CollectionKey)(nil) + +// Returns a formatted collection key for the system data store. +// It assumes the name of the collection is non-empty. +func NewCollectionKey(id uint32) CollectionKey { + return CollectionKey{CollectionID: id} +} + +func (k CollectionKey) ToString() string { + return fmt.Sprintf("%s/%s", COLLECTION_ID, strconv.Itoa(int(k.CollectionID))) +} + +func (k CollectionKey) Bytes() []byte { + return []byte(k.ToString()) +} + +func (k CollectionKey) ToDS() ds.Key { + return ds.NewKey(k.ToString()) +} diff --git a/internal/keys/systemstore_collection_index.go b/internal/keys/systemstore_collection_index.go new file mode 100644 index 0000000000..34eed12482 --- /dev/null +++ b/internal/keys/systemstore_collection_index.go @@ -0,0 +1,86 @@ +// Copyright 2024 Democratized Data Foundation +// +// Use of this software is governed by the Business Source License +// included in the file licenses/BSL.txt. +// +// As of the Change Date specified in that file, in accordance with +// the Business Source License, use of this software will be governed +// by the Apache License, Version 2.0, included in the file +// licenses/APL.txt. + +package keys + +import ( + "fmt" + "strconv" + "strings" + + ds "github.com/ipfs/go-datastore" + "github.com/sourcenetwork/immutable" +) + +// CollectionIndexKey to a stored description of an index +type CollectionIndexKey struct { + // CollectionID is the id of the collection that the index is on + CollectionID immutable.Option[uint32] + // IndexName is the name of the index + IndexName string +} + +var _ Key = (*CollectionIndexKey)(nil) + +// NewCollectionIndexKey creates a new CollectionIndexKey from a collection name and index name. +func NewCollectionIndexKey(colID immutable.Option[uint32], indexName string) CollectionIndexKey { + return CollectionIndexKey{CollectionID: colID, IndexName: indexName} +} + +// NewCollectionIndexKeyFromString creates a new CollectionIndexKey from a string. +// It expects the input string is in the following format: +// +// /collection/index/[CollectionID]/[IndexName] +// +// Where [IndexName] might be omitted. Anything else will return an error. +func NewCollectionIndexKeyFromString(key string) (CollectionIndexKey, error) { + keyArr := strings.Split(key, "/") + if len(keyArr) < 4 || len(keyArr) > 5 || keyArr[1] != COLLECTION || keyArr[2] != "index" { + return CollectionIndexKey{}, ErrInvalidKey + } + + colID, err := strconv.Atoi(keyArr[3]) + if err != nil { + return CollectionIndexKey{}, err + } + + result := CollectionIndexKey{CollectionID: immutable.Some(uint32(colID))} + if len(keyArr) == 5 { + result.IndexName = keyArr[4] + } + return result, nil +} + +// ToString returns the string representation of the key +// It is in the following format: +// /collection/index/[CollectionID]/[IndexName] +// if [CollectionID] is empty, the rest is ignored +func (k CollectionIndexKey) ToString() string { + result := COLLECTION_INDEX + + if k.CollectionID.HasValue() { + result = result + "/" + fmt.Sprint(k.CollectionID.Value()) + if k.IndexName != "" { + result = result + "/" + k.IndexName + } + } + + return result +} + +// Bytes returns the byte representation of the key +func (k CollectionIndexKey) Bytes() []byte { + return []byte(k.ToString()) +} + +// ToDS returns the datastore key +func (k CollectionIndexKey) ToDS() ds.Key { + return ds.NewKey(k.ToString()) +} diff --git a/internal/keys/systemstore_collection_name.go b/internal/keys/systemstore_collection_name.go new file mode 100644 index 0000000000..6d03a42e23 --- /dev/null +++ b/internal/keys/systemstore_collection_name.go @@ -0,0 +1,41 @@ +// Copyright 2024 Democratized Data Foundation +// +// Use of this software is governed by the Business Source License +// included in the file licenses/BSL.txt. +// +// As of the Change Date specified in that file, in accordance with +// the Business Source License, use of this software will be governed +// by the Apache License, Version 2.0, included in the file +// licenses/APL.txt. + +package keys + +import ( + "fmt" + + ds "github.com/ipfs/go-datastore" +) + +// CollectionNameKey points to the ID of the collection of the given +// name. +type CollectionNameKey struct { + Name string +} + +var _ Key = (*CollectionNameKey)(nil) + +func NewCollectionNameKey(name string) CollectionNameKey { + return CollectionNameKey{Name: name} +} + +func (k CollectionNameKey) ToString() string { + return fmt.Sprintf("%s/%s", COLLECTION_NAME, k.Name) +} + +func (k CollectionNameKey) Bytes() []byte { + return []byte(k.ToString()) +} + +func (k CollectionNameKey) ToDS() ds.Key { + return ds.NewKey(k.ToString()) +} diff --git a/internal/keys/systemstore_collection_root.go b/internal/keys/systemstore_collection_root.go new file mode 100644 index 0000000000..bef50e2ec2 --- /dev/null +++ b/internal/keys/systemstore_collection_root.go @@ -0,0 +1,83 @@ +// Copyright 2024 Democratized Data Foundation +// +// Use of this software is governed by the Business Source License +// included in the file licenses/BSL.txt. +// +// As of the Change Date specified in that file, in accordance with +// the Business Source License, use of this software will be governed +// by the Apache License, Version 2.0, included in the file +// licenses/APL.txt. + +package keys + +import ( + "fmt" + "strconv" + "strings" + + ds "github.com/ipfs/go-datastore" +) + +// CollectionRootKey points to nil, but the keys/prefix can be used +// to get collections that are of a given RootID. +// +// It is stored in the format `/collection/root/[RootID]/[CollectionID]`. +type CollectionRootKey struct { + RootID uint32 + CollectionID uint32 +} + +var _ Key = (*CollectionRootKey)(nil) + +func NewCollectionRootKey(rootID uint32, collectionID uint32) CollectionRootKey { + return CollectionRootKey{ + RootID: rootID, + CollectionID: collectionID, + } +} + +// NewCollectionRootKeyFromString creates a new [CollectionRootKey]. +// +// It expects the key to be in the format `/collection/root/[RootID]/[CollectionID]`. +func NewCollectionRootKeyFromString(key string) (CollectionRootKey, error) { + keyArr := strings.Split(key, "/") + if len(keyArr) != 5 || keyArr[1] != COLLECTION || keyArr[2] != "root" { + return CollectionRootKey{}, ErrInvalidKey + } + rootID, err := strconv.Atoi(keyArr[3]) + if err != nil { + return CollectionRootKey{}, err + } + + collectionID, err := strconv.Atoi(keyArr[4]) + if err != nil { + return CollectionRootKey{}, err + } + + return CollectionRootKey{ + RootID: uint32(rootID), + CollectionID: uint32(collectionID), + }, nil +} + +func (k CollectionRootKey) ToString() string { + result := COLLECTION_ROOT + + if k.RootID != 0 { + result = fmt.Sprintf("%s/%s", result, strconv.Itoa(int(k.RootID))) + } + + if k.CollectionID != 0 { + result = fmt.Sprintf("%s/%s", result, strconv.Itoa(int(k.CollectionID))) + } + + return result +} + +func (k CollectionRootKey) Bytes() []byte { + return []byte(k.ToString()) +} + +func (k CollectionRootKey) ToDS() ds.Key { + return ds.NewKey(k.ToString()) +} diff --git a/internal/keys/systemstore_collection_schema.go b/internal/keys/systemstore_collection_schema.go new file mode 100644 index 0000000000..eb84e95812 --- /dev/null +++ b/internal/keys/systemstore_collection_schema.go @@ -0,0 +1,75 @@ +// Copyright 2024 Democratized Data Foundation +// +// Use of this software is governed by the Business Source License +// included in the file licenses/BSL.txt. +// +// As of the Change Date specified in that file, in accordance with +// the Business Source License, use of this software will be governed +// by the Apache License, Version 2.0, included in the file +// licenses/APL.txt. + +package keys + +import ( + "fmt" + "strconv" + "strings" + + ds "github.com/ipfs/go-datastore" +) + +// CollectionSchemaVersionKey points to nil, but the keys/prefix can be used +// to get collections that are using, or have used a given schema version. +// +// If a collection is updated to a different schema version, the old entry(s) +// of this key will be preserved. +// +// This key should be removed in https://github.com/sourcenetwork/defradb/issues/1085 +type CollectionSchemaVersionKey struct { + SchemaVersionID string + CollectionID uint32 +} + +var _ Key = (*CollectionSchemaVersionKey)(nil) + +func NewCollectionSchemaVersionKey(schemaVersionId string, collectionID uint32) CollectionSchemaVersionKey { + return CollectionSchemaVersionKey{ + SchemaVersionID: schemaVersionId, + CollectionID: collectionID, + } +} + +func NewCollectionSchemaVersionKeyFromString(key string) (CollectionSchemaVersionKey, error) { + elements := strings.Split(key, "/") + colID, err := strconv.Atoi(elements[len(elements)-1]) + if err != nil { + return CollectionSchemaVersionKey{}, err + } + + return CollectionSchemaVersionKey{ + SchemaVersionID: elements[len(elements)-2], + CollectionID: uint32(colID), + }, nil +} + +func (k CollectionSchemaVersionKey) ToString() string { + result := COLLECTION_SCHEMA_VERSION + + if k.SchemaVersionID != "" { + result = result + "/" + k.SchemaVersionID + } + + if k.CollectionID != 0 { + result = fmt.Sprintf("%s/%s", result, strconv.Itoa(int(k.CollectionID))) + } + + return result +} + +func (k CollectionSchemaVersionKey) Bytes() []byte { + return []byte(k.ToString()) +} + +func (k CollectionSchemaVersionKey) ToDS() ds.Key { + return ds.NewKey(k.ToString()) +} diff --git a/internal/keys/systemstore_p2p_collection.go b/internal/keys/systemstore_p2p_collection.go new file mode 100644 index 0000000000..129c3c27a5 --- /dev/null +++ b/internal/keys/systemstore_p2p_collection.go @@ -0,0 +1,56 @@ +// Copyright 2024 Democratized Data Foundation +// +// Use of this software is governed by the Business Source License +// included in the file licenses/BSL.txt. +// +// As of the Change Date specified in that file, in accordance with +// the Business Source License, use of this software will be governed +// by the Apache License, Version 2.0, included in the file +// licenses/APL.txt. + +package keys + +import ( + "strings" + + ds "github.com/ipfs/go-datastore" + + "github.com/sourcenetwork/defradb/errors" +) + +type P2PCollectionKey struct { + CollectionID string +} + +var _ Key = (*P2PCollectionKey)(nil) + +// New +func NewP2PCollectionKey(collectionID string) P2PCollectionKey { + return P2PCollectionKey{CollectionID: collectionID} +} + +func NewP2PCollectionKeyFromString(key string) (P2PCollectionKey, error) { + keyArr := strings.Split(key, "/") + if len(keyArr) != 4 { + return P2PCollectionKey{}, errors.WithStack(ErrInvalidKey, errors.NewKV("Key", key)) + } + return NewP2PCollectionKey(keyArr[3]), nil +} + +func (k P2PCollectionKey) ToString() string { + result := P2P_COLLECTION + + if k.CollectionID != "" { + result = result + "/" + k.CollectionID + } + + return result +} + +func (k P2PCollectionKey) Bytes() []byte { + return []byte(k.ToString()) +} + +func (k P2PCollectionKey) ToDS() ds.Key { + return ds.NewKey(k.ToString()) +} diff --git a/internal/keys/systemstore_schema_root.go b/internal/keys/systemstore_schema_root.go new file mode 100644 index 0000000000..848e05c83c --- /dev/null +++ b/internal/keys/systemstore_schema_root.go @@ -0,0 +1,69 @@ +// Copyright 2024 Democratized Data Foundation +// +// Use of this software is governed by the Business Source License +// included in the file licenses/BSL.txt. +// +// As of the Change Date specified in that file, in accordance with +// the Business Source License, use of this software will be governed +// by the Apache License, Version 2.0, included in the file +// licenses/APL.txt. + +package keys + +import ( + "strings" + + ds "github.com/ipfs/go-datastore" +) + +// SchemaRootKey indexes schema version ids by their root schema id. +// +// The index is the key, there are no values stored against the key. +type SchemaRootKey struct { + SchemaRoot string + SchemaVersionID string +} + +var _ Key = (*SchemaRootKey)(nil) + +func NewSchemaRootKey(schemaRoot string, schemaVersionID string) SchemaRootKey { + return SchemaRootKey{ + SchemaRoot: schemaRoot, + SchemaVersionID: schemaVersionID, + } +} + +func NewSchemaRootKeyFromString(keyString string) (SchemaRootKey, error) { + keyString = strings.TrimPrefix(keyString, SCHEMA_VERSION_ROOT+"/") + elements := strings.Split(keyString, "/") + if len(elements) != 2 { + return SchemaRootKey{}, ErrInvalidKey + } + + return SchemaRootKey{ + SchemaRoot: elements[0], + SchemaVersionID: elements[1], + }, nil +} + +func (k SchemaRootKey) ToString() string { + result := SCHEMA_VERSION_ROOT + + if k.SchemaRoot != "" { + result = result + "/" + k.SchemaRoot + } + + if k.SchemaVersionID != "" { + result = result + "/" + k.SchemaVersionID + } + + return result +} + +func (k SchemaRootKey) Bytes() []byte { + return []byte(k.ToString()) +} + +func (k SchemaRootKey) ToDS() ds.Key { + return ds.NewKey(k.ToString()) +} diff --git a/internal/keys/systemstore_schema_version.go b/internal/keys/systemstore_schema_version.go new file mode 100644 index 0000000000..d435aa7e4a --- /dev/null +++ b/internal/keys/systemstore_schema_version.go @@ -0,0 +1,44 @@ +// Copyright 2024 Democratized Data Foundation +// +// Use of this software is governed by the Business Source License +// included in the file licenses/BSL.txt. +// +// As of the Change Date specified in that file, in accordance with +// the Business Source License, use of this software will be governed +// by the Apache License, Version 2.0, included in the file +// licenses/APL.txt. + +package keys + +import ds "github.com/ipfs/go-datastore" + +// SchemaVersionKey points to the json serialized schema at the specified version. +// +// It's corresponding value is immutable. +type SchemaVersionKey struct { + SchemaVersionID string +} + +var _ Key = (*SchemaVersionKey)(nil) + +func NewSchemaVersionKey(schemaVersionID string) SchemaVersionKey { + return SchemaVersionKey{SchemaVersionID: schemaVersionID} +} + +func (k SchemaVersionKey) ToString() string { + result := SCHEMA_VERSION + + if k.SchemaVersionID != "" { + result = result + "/" + k.SchemaVersionID + } + + return result +} + +func (k SchemaVersionKey) Bytes() []byte { + return []byte(k.ToString()) +} + +func (k SchemaVersionKey) ToDS() ds.Key { + return ds.NewKey(k.ToString()) +} diff --git a/internal/keys/systemstore_seq_collection_id.go b/internal/keys/systemstore_seq_collection_id.go new file mode 100644 index 0000000000..a589e64cce --- /dev/null +++ b/internal/keys/systemstore_seq_collection_id.go @@ -0,0 +1,30 @@ +// Copyright 2024 Democratized Data Foundation +// +// Use of this software is governed by the Business Source License +// included in the file licenses/BSL.txt. +// +// As of the Change Date specified in that file, in accordance with +// the Business Source License, use of this software will be governed +// by the Apache License, Version 2.0, included in the file +// licenses/APL.txt. + +package keys + +import ds "github.com/ipfs/go-datastore" + +// CollectionIDSequenceKey is used to key the sequence used to generate collection ids. +type CollectionIDSequenceKey struct{} + +var _ Key = (*CollectionIDSequenceKey)(nil) + +func (k CollectionIDSequenceKey) ToString() string { + return COLLECTION_SEQ +} + +func (k CollectionIDSequenceKey) Bytes() []byte { + return []byte(k.ToString()) +} + +func (k CollectionIDSequenceKey) ToDS() ds.Key { + return ds.NewKey(k.ToString()) +} diff --git a/internal/keys/systemstore_seq_field_id.go b/internal/keys/systemstore_seq_field_id.go new file mode 100644 index 0000000000..7cd01e79d3 --- /dev/null +++ b/internal/keys/systemstore_seq_field_id.go @@ -0,0 +1,43 @@ +// Copyright 2024 Democratized Data Foundation +// +// Use of this software is governed by the Business Source License +// included in the file licenses/BSL.txt. +// +// As of the Change Date specified in that file, in accordance with +// the Business Source License, use of this software will be governed +// by the Apache License, Version 2.0, included in the file +// licenses/APL.txt. + +package keys + +import ( + "strconv" + + ds "github.com/ipfs/go-datastore" +) + +// FieldIDSequenceKey is used to key the sequence used to generate field ids. +// +// The sequence is specific to each collection root. Multiple collection of the same root +// must maintain consistent field ids. +type FieldIDSequenceKey struct { + CollectionRoot uint32 +} + +var _ Key = (*FieldIDSequenceKey)(nil) + +func NewFieldIDSequenceKey(collectionRoot uint32) FieldIDSequenceKey { + return FieldIDSequenceKey{CollectionRoot: collectionRoot} +} + +func (k FieldIDSequenceKey) ToString() string { + return FIELD_ID_SEQ + "/" + strconv.Itoa(int(k.CollectionRoot)) +} + +func (k FieldIDSequenceKey) Bytes() []byte { + return []byte(k.ToString()) +} + +func (k FieldIDSequenceKey) ToDS() ds.Key { + return ds.NewKey(k.ToString()) +} diff --git a/internal/keys/systemstore_seq_index_id.go b/internal/keys/systemstore_seq_index_id.go new file mode 100644 index 0000000000..5fbf09d145 --- /dev/null +++ b/internal/keys/systemstore_seq_index_id.go @@ -0,0 +1,42 @@ +// Copyright 2024 Democratized Data Foundation +// +// Use of this software is governed by the Business Source License +// included in the file licenses/BSL.txt. +// +// As of the Change Date specified in that file, in accordance with +// the Business Source License, use of this software will be governed +// by the Apache License, Version 2.0, included in the file +// licenses/APL.txt. + +package keys + +import ( + "strconv" + + ds "github.com/ipfs/go-datastore" +) + +// IndexIDSequenceKey is used to key the sequence used to generate index ids. +// +// The sequence is specific to each collection version. +type IndexIDSequenceKey struct { + CollectionID uint32 +} + +var _ Key = (*IndexIDSequenceKey)(nil) + +func NewIndexIDSequenceKey(collectionID uint32) IndexIDSequenceKey { + return IndexIDSequenceKey{CollectionID: collectionID} +} + +func (k IndexIDSequenceKey) ToString() string { + return INDEX_ID_SEQ + "/" + strconv.Itoa(int(k.CollectionID)) +} + +func (k IndexIDSequenceKey) Bytes() []byte { + return []byte(k.ToString()) +} + +func (k IndexIDSequenceKey) ToDS() ds.Key { + return ds.NewKey(k.ToString()) +} diff --git a/internal/lens/fetcher.go b/internal/lens/fetcher.go index bbe0c45a0d..db9e418afa 100644 --- a/internal/lens/fetcher.go +++ b/internal/lens/fetcher.go @@ -25,6 +25,7 @@ import ( "github.com/sourcenetwork/defradb/datastore" "github.com/sourcenetwork/defradb/internal/core" "github.com/sourcenetwork/defradb/internal/db/fetcher" + "github.com/sourcenetwork/defradb/internal/keys" "github.com/sourcenetwork/defradb/internal/planner/mapper" ) @@ -294,10 +295,10 @@ func (f *lensedFetcher) updateDataStore(ctx context.Context, original map[string return core.ErrInvalidKey } - datastoreKeyBase := core.DataStoreKey{ + datastoreKeyBase := keys.DataStoreKey{ CollectionRootID: f.col.Description().RootID, DocID: docID, - InstanceType: core.ValueKey, + InstanceType: keys.ValueKey, } for fieldName, value := range modifiedFieldValuesByName { @@ -320,7 +321,7 @@ func (f *lensedFetcher) updateDataStore(ctx context.Context, original map[string } } - versionKey := datastoreKeyBase.WithFieldID(core.DATASTORE_DOC_VERSION_FIELD_ID) + versionKey := datastoreKeyBase.WithFieldID(keys.DATASTORE_DOC_VERSION_FIELD_ID) err := f.txn.Datastore().Put(ctx, versionKey.ToDS(), []byte(f.targetVersionID)) if err != nil { return err diff --git a/internal/merkle/clock/clock.go b/internal/merkle/clock/clock.go index 9f0f6e77fb..94180f2144 100644 --- a/internal/merkle/clock/clock.go +++ b/internal/merkle/clock/clock.go @@ -27,6 +27,7 @@ import ( "github.com/sourcenetwork/defradb/internal/core" coreblock "github.com/sourcenetwork/defradb/internal/core/block" "github.com/sourcenetwork/defradb/internal/encryption" + "github.com/sourcenetwork/defradb/internal/keys" ) var ( @@ -47,7 +48,7 @@ func NewMerkleClock( headstore datastore.DSReaderWriter, blockstore datastore.Blockstore, encstore datastore.Blockstore, - namespace core.HeadStoreKey, + namespace keys.HeadStoreKey, crdt core.ReplicatedData, ) *MerkleClock { return &MerkleClock{ diff --git a/internal/merkle/clock/clock_test.go b/internal/merkle/clock/clock_test.go index f3f2f6e155..c0f169c0a5 100644 --- a/internal/merkle/clock/clock_test.go +++ b/internal/merkle/clock/clock_test.go @@ -19,10 +19,10 @@ import ( "github.com/sourcenetwork/defradb/client/request" "github.com/sourcenetwork/defradb/datastore" - "github.com/sourcenetwork/defradb/internal/core" coreblock "github.com/sourcenetwork/defradb/internal/core/block" ccid "github.com/sourcenetwork/defradb/internal/core/cid" "github.com/sourcenetwork/defradb/internal/core/crdt" + "github.com/sourcenetwork/defradb/internal/keys" ) func newDS() ds.Datastore { @@ -33,12 +33,12 @@ func newTestMerkleClock() *MerkleClock { s := newDS() multistore := datastore.MultiStoreFrom(s) - reg := crdt.NewLWWRegister(multistore.Rootstore(), core.CollectionSchemaVersionKey{}, core.DataStoreKey{}, "") + reg := crdt.NewLWWRegister(multistore.Rootstore(), keys.CollectionSchemaVersionKey{}, keys.DataStoreKey{}, "") return NewMerkleClock( multistore.Headstore(), multistore.Blockstore(), multistore.Encstore(), - core.HeadStoreKey{DocID: request.DocIDArgName, FieldID: "1"}, + keys.HeadStoreKey{DocID: request.DocIDArgName, FieldID: "1"}, reg, ) } @@ -46,8 +46,8 @@ func newTestMerkleClock() *MerkleClock { func TestNewMerkleClock(t *testing.T) { s := newDS() multistore := datastore.MultiStoreFrom(s) - reg := crdt.NewLWWRegister(multistore.Rootstore(), core.CollectionSchemaVersionKey{}, core.DataStoreKey{}, "") - clk := NewMerkleClock(multistore.Headstore(), multistore.Blockstore(), multistore.Encstore(), core.HeadStoreKey{}, reg) + reg := crdt.NewLWWRegister(multistore.Rootstore(), keys.CollectionSchemaVersionKey{}, keys.DataStoreKey{}, "") + clk := NewMerkleClock(multistore.Headstore(), multistore.Blockstore(), multistore.Encstore(), keys.HeadStoreKey{}, reg) if clk.headstore != multistore.Headstore() { t.Error("MerkleClock store not correctly set") diff --git a/internal/merkle/clock/heads.go b/internal/merkle/clock/heads.go index 9b1fad43dd..0dcf2a8f99 100644 --- a/internal/merkle/clock/heads.go +++ b/internal/merkle/clock/heads.go @@ -21,23 +21,23 @@ import ( "github.com/sourcenetwork/corelog" "github.com/sourcenetwork/defradb/datastore" - "github.com/sourcenetwork/defradb/internal/core" + "github.com/sourcenetwork/defradb/internal/keys" ) // heads manages the current Merkle-CRDT heads. type heads struct { store datastore.DSReaderWriter - namespace core.HeadStoreKey + namespace keys.HeadStoreKey } -func NewHeadSet(store datastore.DSReaderWriter, namespace core.HeadStoreKey) *heads { +func NewHeadSet(store datastore.DSReaderWriter, namespace keys.HeadStoreKey) *heads { return &heads{ store: store, namespace: namespace, } } -func (hh *heads) key(c cid.Cid) core.HeadStoreKey { +func (hh *heads) key(c cid.Cid) keys.HeadStoreKey { return hh.namespace.WithCid(c) } @@ -102,7 +102,7 @@ func (hh *heads) List(ctx context.Context) ([]cid.Cid, uint64, error) { return nil, 0, NewErrFailedToGetNextQResult(r.Error) } - headKey, err := core.NewHeadStoreKey(r.Key) + headKey, err := keys.NewHeadStoreKey(r.Key) if err != nil { return nil, 0, err } diff --git a/internal/merkle/clock/heads_test.go b/internal/merkle/clock/heads_test.go index 0eb7acdd0e..cb8e1d1014 100644 --- a/internal/merkle/clock/heads_test.go +++ b/internal/merkle/clock/heads_test.go @@ -22,8 +22,8 @@ import ( "github.com/ipfs/go-cid" "github.com/sourcenetwork/defradb/datastore" - "github.com/sourcenetwork/defradb/internal/core" ccid "github.com/sourcenetwork/defradb/internal/core/cid" + "github.com/sourcenetwork/defradb/internal/keys" ) func newRandomCID() cid.Cid { @@ -45,7 +45,7 @@ func newHeadSet() *heads { return NewHeadSet( datastore.AsDSReaderWriter(s), - core.HeadStoreKey{}.WithDocID("myDocID").WithFieldID("1"), + keys.HeadStoreKey{}.WithDocID("myDocID").WithFieldID("1"), ) } diff --git a/internal/merkle/crdt/composite.go b/internal/merkle/crdt/composite.go index fe9c13a0f5..44df12f83f 100644 --- a/internal/merkle/crdt/composite.go +++ b/internal/merkle/crdt/composite.go @@ -16,9 +16,9 @@ import ( cidlink "github.com/ipld/go-ipld-prime/linking/cid" "github.com/sourcenetwork/defradb/client" - "github.com/sourcenetwork/defradb/internal/core" coreblock "github.com/sourcenetwork/defradb/internal/core/block" corecrdt "github.com/sourcenetwork/defradb/internal/core/crdt" + "github.com/sourcenetwork/defradb/internal/keys" "github.com/sourcenetwork/defradb/internal/merkle/clock" ) @@ -35,8 +35,8 @@ var _ MerkleCRDT = (*MerkleCompositeDAG)(nil) // backed by a CompositeDAG CRDT. func NewMerkleCompositeDAG( store Stores, - schemaVersionKey core.CollectionSchemaVersionKey, - key core.DataStoreKey, + schemaVersionKey keys.CollectionSchemaVersionKey, + key keys.DataStoreKey, ) *MerkleCompositeDAG { compositeDag := corecrdt.NewCompositeDAG( store.Datastore(), diff --git a/internal/merkle/crdt/counter.go b/internal/merkle/crdt/counter.go index c43a795294..50434ed7da 100644 --- a/internal/merkle/crdt/counter.go +++ b/internal/merkle/crdt/counter.go @@ -16,8 +16,8 @@ import ( cidlink "github.com/ipld/go-ipld-prime/linking/cid" "github.com/sourcenetwork/defradb/client" - "github.com/sourcenetwork/defradb/internal/core" "github.com/sourcenetwork/defradb/internal/core/crdt" + "github.com/sourcenetwork/defradb/internal/keys" "github.com/sourcenetwork/defradb/internal/merkle/clock" ) @@ -33,8 +33,8 @@ var _ MerkleCRDT = (*MerkleCounter)(nil) // backed by a Counter CRDT. func NewMerkleCounter( store Stores, - schemaVersionKey core.CollectionSchemaVersionKey, - key core.DataStoreKey, + schemaVersionKey keys.CollectionSchemaVersionKey, + key keys.DataStoreKey, fieldName string, allowDecrement bool, kind client.ScalarKind, diff --git a/internal/merkle/crdt/lwwreg.go b/internal/merkle/crdt/lwwreg.go index d24c2a107e..18fc7ee35d 100644 --- a/internal/merkle/crdt/lwwreg.go +++ b/internal/merkle/crdt/lwwreg.go @@ -16,8 +16,8 @@ import ( cidlink "github.com/ipld/go-ipld-prime/linking/cid" "github.com/sourcenetwork/defradb/client" - "github.com/sourcenetwork/defradb/internal/core" corecrdt "github.com/sourcenetwork/defradb/internal/core/crdt" + "github.com/sourcenetwork/defradb/internal/keys" "github.com/sourcenetwork/defradb/internal/merkle/clock" ) @@ -33,8 +33,8 @@ var _ MerkleCRDT = (*MerkleLWWRegister)(nil) // backed by a LWWRegister CRDT. func NewMerkleLWWRegister( store Stores, - schemaVersionKey core.CollectionSchemaVersionKey, - key core.DataStoreKey, + schemaVersionKey keys.CollectionSchemaVersionKey, + key keys.DataStoreKey, fieldName string, ) *MerkleLWWRegister { register := corecrdt.NewLWWRegister(store.Datastore(), schemaVersionKey, key, fieldName) diff --git a/internal/merkle/crdt/merklecrdt.go b/internal/merkle/crdt/merklecrdt.go index a5cc00a9e1..3dd47ad0dc 100644 --- a/internal/merkle/crdt/merklecrdt.go +++ b/internal/merkle/crdt/merklecrdt.go @@ -20,7 +20,7 @@ import ( "github.com/sourcenetwork/defradb/client" "github.com/sourcenetwork/defradb/datastore" - "github.com/sourcenetwork/defradb/internal/core" + "github.com/sourcenetwork/defradb/internal/keys" "github.com/sourcenetwork/defradb/internal/merkle/clock" ) @@ -43,10 +43,10 @@ type MerkleCRDT interface { func InstanceWithStore( store Stores, - schemaVersionKey core.CollectionSchemaVersionKey, + schemaVersionKey keys.CollectionSchemaVersionKey, cType client.CType, kind client.FieldKind, - key core.DataStoreKey, + key keys.DataStoreKey, fieldName string, ) (MerkleCRDT, error) { switch cType { diff --git a/internal/planner/commit.go b/internal/planner/commit.go index d9551dc7ab..dc9a0ce3d7 100644 --- a/internal/planner/commit.go +++ b/internal/planner/commit.go @@ -20,6 +20,7 @@ import ( "github.com/sourcenetwork/defradb/internal/core" coreblock "github.com/sourcenetwork/defradb/internal/core/block" "github.com/sourcenetwork/defradb/internal/db/fetcher" + "github.com/sourcenetwork/defradb/internal/keys" "github.com/sourcenetwork/defradb/internal/planner/mapper" ) @@ -68,7 +69,7 @@ func (n *dagScanNode) Kind() string { func (n *dagScanNode) Init() error { if len(n.spans.Value) == 0 { if n.commitSelect.DocID.HasValue() { - dsKey := core.DataStoreKey{}.WithDocID(n.commitSelect.DocID.Value()) + dsKey := keys.DataStoreKey{}.WithDocID(n.commitSelect.DocID.Value()) if n.commitSelect.FieldID.HasValue() { field := n.commitSelect.FieldID.Value() @@ -112,7 +113,7 @@ func (n *dagScanNode) Spans(spans core.Spans) { for i, span := range headSetSpans.Value { if span.Start().FieldID != fieldID { - headSetSpans.Value[i] = core.NewSpan(span.Start().WithFieldID(fieldID), core.DataStoreKey{}) + headSetSpans.Value[i] = core.NewSpan(span.Start().WithFieldID(fieldID), keys.DataStoreKey{}) } } diff --git a/internal/planner/multi.go b/internal/planner/multi.go index 4b82826118..c4c3278480 100644 --- a/internal/planner/multi.go +++ b/internal/planner/multi.go @@ -13,6 +13,7 @@ package planner import ( "github.com/sourcenetwork/defradb/client" "github.com/sourcenetwork/defradb/internal/core" + "github.com/sourcenetwork/defradb/internal/keys" ) /* @@ -156,7 +157,7 @@ func (p *parallelNode) nextAppend(index int, plan planNode) (bool, error) { } // pass the doc key as a reference through the spans interface - spans := core.NewSpans(core.NewSpan(core.DataStoreKey{DocID: key}, core.DataStoreKey{})) + spans := core.NewSpans(core.NewSpan(keys.DataStoreKey{DocID: key}, keys.DataStoreKey{})) plan.Spans(spans) err := plan.Init() if err != nil { diff --git a/internal/planner/select.go b/internal/planner/select.go index 064f9b2fec..9393103e40 100644 --- a/internal/planner/select.go +++ b/internal/planner/select.go @@ -19,6 +19,7 @@ import ( "github.com/sourcenetwork/defradb/internal/core" "github.com/sourcenetwork/defradb/internal/db/base" "github.com/sourcenetwork/defradb/internal/db/fetcher" + "github.com/sourcenetwork/defradb/internal/keys" "github.com/sourcenetwork/defradb/internal/planner/mapper" ) @@ -264,7 +265,7 @@ func (n *selectNode) initSource() ([]aggregateNode, error) { return nil, err } spans := fetcher.NewVersionedSpan( - core.DataStoreKey{DocID: n.selectReq.DocIDs.Value()[0]}, + keys.DataStoreKey{DocID: n.selectReq.DocIDs.Value()[0]}, c, ) // @todo check len origScan.Spans(spans) diff --git a/internal/planner/view.go b/internal/planner/view.go index 0226a2c9c6..e5beef128b 100644 --- a/internal/planner/view.go +++ b/internal/planner/view.go @@ -15,6 +15,7 @@ import ( "github.com/sourcenetwork/defradb/client" "github.com/sourcenetwork/defradb/internal/core" + "github.com/sourcenetwork/defradb/internal/keys" "github.com/sourcenetwork/defradb/internal/planner/mapper" ) @@ -199,7 +200,7 @@ func (n *cachedViewFetcher) Init() error { n.queryResults = nil } - prefix := core.NewViewCacheColPrefix(n.def.Description.RootID) + prefix := keys.NewViewCacheColPrefix(n.def.Description.RootID) var err error n.queryResults, err = n.p.txn.Datastore().Query(n.p.ctx, query.Query{ diff --git a/net/server_test.go b/net/server_test.go index 4dc6428205..a2cda4c76b 100644 --- a/net/server_test.go +++ b/net/server_test.go @@ -22,6 +22,7 @@ import ( "github.com/sourcenetwork/defradb/client" "github.com/sourcenetwork/defradb/errors" "github.com/sourcenetwork/defradb/internal/core" + "github.com/sourcenetwork/defradb/internal/keys" ) func TestNewServerSimple(t *testing.T) { @@ -74,7 +75,7 @@ func TestGetHeadLog(t *testing.T) { } func getHead(ctx context.Context, db client.DB, docID client.DocID) (cid.Cid, error) { - prefix := core.DataStoreKeyFromDocID(docID).ToHeadStoreKey().WithFieldID(core.COMPOSITE_NAMESPACE).ToString() + prefix := keys.DataStoreKeyFromDocID(docID).ToHeadStoreKey().WithFieldID(core.COMPOSITE_NAMESPACE).ToString() results, err := db.Headstore().Query(ctx, query.Query{Prefix: prefix}) if err != nil { return cid.Undef, err @@ -85,7 +86,7 @@ func getHead(ctx context.Context, db client.DB, docID client.DocID) (cid.Cid, er } if len(entries) > 0 { - hsKey, err := core.NewHeadStoreKey(entries[0].Key) + hsKey, err := keys.NewHeadStoreKey(entries[0].Key) if err != nil { return cid.Undef, err }