From 2909d8ef7f27c453023076cfad1f442aa188d1c6 Mon Sep 17 00:00:00 2001 From: Andrew Sisley Date: Fri, 2 Feb 2024 15:09:08 -0500 Subject: [PATCH] Model Collection SchemaVersions and migrations on Collections --- cli/cli.go | 4 +- cli/collection_describe.go | 4 +- cli/schema_migration_down.go | 19 +- cli/schema_migration_get.go | 38 -- cli/schema_migration_set.go | 4 +- cli/schema_migration_set_registry.go | 54 ++ cli/schema_migration_up.go | 19 +- cli/schema_patch.go | 43 +- ...ma_set_default.go => schema_set_active.go} | 13 +- client/db.go | 30 +- client/descriptions.go | 55 ++ client/lens.go | 32 +- client/mocks/collection.go | 14 +- client/mocks/db.go | 76 +-- core/data_test.go | 340 +++++------ core/key.go | 113 ++-- core/key_test.go | 29 +- db/backup.go | 2 +- db/base/collection_keys.go | 6 +- db/collection.go | 278 +++++++-- db/collection_delete.go | 5 +- db/description/collection.go | 44 ++ db/description/schema.go | 18 +- db/indexed_docs_test.go | 4 +- db/lens.go | 160 +++++ db/schema.go | 3 + db/txn_db.go | 29 +- .../i2198-collection-remodel.md | 5 + http/client.go | 27 +- http/client_lens.go | 75 ++- http/handler_lens.go | 113 ++-- http/handler_store.go | 62 +- http/openapi.go | 2 + lens/fetcher.go | 30 +- lens/history.go | 152 ++--- lens/lens.go | 11 +- lens/registry.go | 221 +++---- lens/txn_registry.go | 55 +- net/peer_replicator.go | 4 +- net/server.go | 10 +- net/server_test.go | 4 +- tests/clients/cli/wrapper.go | 34 +- tests/clients/cli/wrapper_lens.go | 50 +- tests/clients/http/wrapper.go | 13 +- tests/gen/cli/gendocs.go | 2 +- tests/integration/lens.go | 57 -- .../schema/migrations/query/simple_test.go | 103 +++- .../schema/migrations/query/with_p2p_test.go | 8 +- .../migrations/query/with_restart_test.go | 4 +- .../migrations/query/with_set_default_test.go | 30 +- .../schema/migrations/simple_test.go | 162 ++++-- .../schema/migrations/with_txn_test.go | 38 +- .../schema/updates/with_schema_branch_test.go | 547 ++++++++++++++++++ .../schema/with_update_set_default_test.go | 8 +- tests/integration/test_case.go | 10 +- tests/integration/utils2.go | 22 +- 56 files changed, 2139 insertions(+), 1156 deletions(-) delete mode 100644 cli/schema_migration_get.go create mode 100644 cli/schema_migration_set_registry.go rename cli/{schema_set_default.go => schema_set_active.go} (60%) create mode 100644 db/lens.go create mode 100644 docs/data_format_changes/i2198-collection-remodel.md create mode 100644 tests/integration/schema/updates/with_schema_branch_test.go diff --git a/cli/cli.go b/cli/cli.go index 2ee882afce..b7d5e05ec7 100644 --- a/cli/cli.go +++ b/cli/cli.go @@ -48,7 +48,7 @@ func NewDefraCommand(cfg *config.Config) *cobra.Command { schema_migrate := MakeSchemaMigrationCommand() schema_migrate.AddCommand( MakeSchemaMigrationSetCommand(), - MakeSchemaMigrationGetCommand(), + MakeSchemaMigrationSetRegistryCommand(), MakeSchemaMigrationReloadCommand(), MakeSchemaMigrationUpCommand(), MakeSchemaMigrationDownCommand(), @@ -58,7 +58,7 @@ func NewDefraCommand(cfg *config.Config) *cobra.Command { schema.AddCommand( MakeSchemaAddCommand(), MakeSchemaPatchCommand(), - MakeSchemaSetDefaultCommand(), + MakeSchemaSetActiveCommand(), MakeSchemaDescribeCommand(), schema_migrate, ) diff --git a/cli/collection_describe.go b/cli/collection_describe.go index a21c4d0c10..0e2e36e718 100644 --- a/cli/collection_describe.go +++ b/cli/collection_describe.go @@ -17,6 +17,7 @@ import ( ) func MakeCollectionDescribeCommand() *cobra.Command { + var getInactive bool var cmd = &cobra.Command{ Use: "describe", Short: "View collection description.", @@ -42,7 +43,7 @@ Example: view collection by version id return writeJSON(cmd, col.Definition()) } // if no collection specified list all collections - cols, err := store.GetAllCollections(cmd.Context()) + cols, err := store.GetAllCollections(cmd.Context(), getInactive) if err != nil { return err } @@ -53,5 +54,6 @@ Example: view collection by version id return writeJSON(cmd, colDesc) }, } + cmd.Flags().BoolVar(&getInactive, "get-inactive", false, "Get inactive collections as well as active") return cmd } diff --git a/cli/schema_migration_down.go b/cli/schema_migration_down.go index 1dcb5e64da..52ca2a2ac6 100644 --- a/cli/schema_migration_down.go +++ b/cli/schema_migration_down.go @@ -23,21 +23,21 @@ import ( func MakeSchemaMigrationDownCommand() *cobra.Command { var file string - var schemaVersionID string + var collectionID uint32 var cmd = &cobra.Command{ - Use: "down --version ", - Short: "Reverses the migration from the specified schema version.", - Long: `Reverses the migration from the specified schema version. + Use: "down --collection ", + Short: "Reverses the migration to the specified collection version.", + Long: `Reverses the migration to the specified collection version. Documents is a list of documents to reverse the migration from. Example: migrate from string - defradb client schema migration down --version bae123 '[{"name": "Bob"}]' + defradb client schema migration down --collection 2 '[{"name": "Bob"}]' Example: migrate from file - defradb client schema migration down --version bae123 -f documents.json + defradb client schema migration down --collection 2 -f documents.json Example: migrate from stdin - cat documents.json | defradb client schema migration down --version bae123 - + cat documents.json | defradb client schema migration down --collection 2 - `, Args: cobra.RangeArgs(0, 1), RunE: func(cmd *cobra.Command, args []string) error { @@ -71,7 +71,8 @@ Example: migrate from stdin if tx, ok := cmd.Context().Value(txContextKey).(datastore.Txn); ok { lens = lens.WithTxn(tx) } - out, err := lens.MigrateDown(cmd.Context(), enumerable.New(src), schemaVersionID) + + out, err := lens.MigrateDown(cmd.Context(), enumerable.New(src), collectionID) if err != nil { return err } @@ -86,6 +87,6 @@ Example: migrate from stdin }, } cmd.Flags().StringVarP(&file, "file", "f", "", "File containing document(s)") - cmd.Flags().StringVar(&schemaVersionID, "version", "", "Schema version id") + cmd.Flags().Uint32Var(&collectionID, "collection", 0, "Collection id") return cmd } diff --git a/cli/schema_migration_get.go b/cli/schema_migration_get.go deleted file mode 100644 index 43b66599b7..0000000000 --- a/cli/schema_migration_get.go +++ /dev/null @@ -1,38 +0,0 @@ -// Copyright 2023 Democratized Data Foundation -// -// Use of this software is governed by the Business Source License -// included in the file licenses/BSL.txt. -// -// As of the Change Date specified in that file, in accordance with -// the Business Source License, use of this software will be governed -// by the Apache License, Version 2.0, included in the file -// licenses/APL.txt. - -package cli - -import ( - "github.com/spf13/cobra" -) - -func MakeSchemaMigrationGetCommand() *cobra.Command { - var cmd = &cobra.Command{ - Use: "get", - Short: "Gets the schema migrations within DefraDB", - Long: `Gets the schema migrations within the local DefraDB node. - -Example: - defradb client schema migration get' - -Learn more about the DefraDB GraphQL Schema Language on https://docs.source.network.`, - RunE: func(cmd *cobra.Command, args []string) error { - store := mustGetStoreContext(cmd) - - cfgs, err := store.LensRegistry().Config(cmd.Context()) - if err != nil { - return err - } - return writeJSON(cmd, cfgs) - }, - } - return cmd -} diff --git a/cli/schema_migration_set.go b/cli/schema_migration_set.go index 280130b8db..91d1d741bc 100644 --- a/cli/schema_migration_set.go +++ b/cli/schema_migration_set.go @@ -27,7 +27,7 @@ func MakeSchemaMigrationSetCommand() *cobra.Command { var cmd = &cobra.Command{ Use: "set [src] [dst] [cfg]", Short: "Set a schema migration within DefraDB", - Long: `Set a migration between two schema versions within the local DefraDB node. + Long: `Set a migration between collections of the given schema versions within the local DefraDB node. Example: set from an argument string: defradb client schema migration set bae123 bae456 '{"lenses": [...' @@ -80,7 +80,7 @@ Learn more about the DefraDB GraphQL Schema Language on https://docs.source.netw Lens: lensCfg, } - return store.LensRegistry().SetMigration(cmd.Context(), migrationCfg) + return store.SetMigration(cmd.Context(), migrationCfg) }, } cmd.Flags().StringVarP(&lensFile, "file", "f", "", "Lens configuration file") diff --git a/cli/schema_migration_set_registry.go b/cli/schema_migration_set_registry.go new file mode 100644 index 0000000000..4303c99c9e --- /dev/null +++ b/cli/schema_migration_set_registry.go @@ -0,0 +1,54 @@ +// Copyright 2023 Democratized Data Foundation +// +// Use of this software is governed by the Business Source License +// included in the file licenses/BSL.txt. +// +// As of the Change Date specified in that file, in accordance with +// the Business Source License, use of this software will be governed +// by the Apache License, Version 2.0, included in the file +// licenses/APL.txt. + +package cli + +import ( + "encoding/json" + "strconv" + "strings" + + "github.com/lens-vm/lens/host-go/config/model" + "github.com/spf13/cobra" +) + +func MakeSchemaMigrationSetRegistryCommand() *cobra.Command { + var cmd = &cobra.Command{ + Use: "set-registry [collectionID] [cfg]", + Short: "Set a schema migration within the DefraDB LensRegistry", + Long: `Set a migration to a collection within the LensRegistry of the local DefraDB node. +Does not persist the migration after restart. + +Example: set from an argument string: + defradb client schema migration set-registry 2 '{"lenses": [...' + +Learn more about the DefraDB GraphQL Schema Language on https://docs.source.network.`, + Args: cobra.ExactArgs(2), + RunE: func(cmd *cobra.Command, args []string) error { + store := mustGetStoreContext(cmd) + + decoder := json.NewDecoder(strings.NewReader(args[1])) + decoder.DisallowUnknownFields() + + var lensCfg model.Lens + if err := decoder.Decode(&lensCfg); err != nil { + return NewErrInvalidLensConfig(err) + } + + collectionID, err := strconv.ParseUint(args[0], 10, 64) + if err != nil { + return err + } + + return store.LensRegistry().SetMigration(cmd.Context(), uint32(collectionID), lensCfg) + }, + } + return cmd +} diff --git a/cli/schema_migration_up.go b/cli/schema_migration_up.go index 3b0b522349..e4f4399181 100644 --- a/cli/schema_migration_up.go +++ b/cli/schema_migration_up.go @@ -23,21 +23,21 @@ import ( func MakeSchemaMigrationUpCommand() *cobra.Command { var file string - var schemaVersionID string + var collectionID uint32 var cmd = &cobra.Command{ - Use: "up --version ", - Short: "Applies the migration to the specified schema version.", - Long: `Applies the migration to the specified schema version. + Use: "up --collection ", + Short: "Applies the migration to the specified collection version.", + Long: `Applies the migration to the specified collection version. Documents is a list of documents to apply the migration to. Example: migrate from string - defradb client schema migration up --version bae123 '[{"name": "Bob"}]' + defradb client schema migration up --collection 2 '[{"name": "Bob"}]' Example: migrate from file - defradb client schema migration up --version bae123 -f documents.json + defradb client schema migration up --collection 2 -f documents.json Example: migrate from stdin - cat documents.json | defradb client schema migration up --version bae123 - + cat documents.json | defradb client schema migration up --collection 2 - `, Args: cobra.RangeArgs(0, 1), RunE: func(cmd *cobra.Command, args []string) error { @@ -71,7 +71,8 @@ Example: migrate from stdin if tx, ok := cmd.Context().Value(txContextKey).(datastore.Txn); ok { lens = lens.WithTxn(tx) } - out, err := lens.MigrateUp(cmd.Context(), enumerable.New(src), schemaVersionID) + + out, err := lens.MigrateUp(cmd.Context(), enumerable.New(src), collectionID) if err != nil { return err } @@ -86,6 +87,6 @@ Example: migrate from stdin }, } cmd.Flags().StringVarP(&file, "file", "f", "", "File containing document(s)") - cmd.Flags().StringVar(&schemaVersionID, "version", "", "Schema version id") + cmd.Flags().Uint32Var(&collectionID, "collection", 0, "Collection id") return cmd } diff --git a/cli/schema_patch.go b/cli/schema_patch.go index 70f4283c85..7be683bb9e 100644 --- a/cli/schema_patch.go +++ b/cli/schema_patch.go @@ -11,16 +11,21 @@ package cli import ( + "encoding/json" "fmt" "io" "os" + "strings" + "github.com/lens-vm/lens/host-go/config/model" + "github.com/sourcenetwork/immutable" "github.com/spf13/cobra" ) func MakeSchemaPatchCommand() *cobra.Command { var patchFile string - var setDefault bool + var lensFile string + var setActive bool var cmd = &cobra.Command{ Use: "patch [schema]", Short: "Patch an existing schema type", @@ -29,7 +34,7 @@ func MakeSchemaPatchCommand() *cobra.Command { Uses JSON Patch to modify schema types. Example: patch from an argument string: - defradb client schema patch '[{ "op": "add", "path": "...", "value": {...} }]' + defradb client schema patch '[{ "op": "add", "path": "...", "value": {...} }]' '{"lenses": [...' Example: patch from file: defradb client schema patch -f patch.json @@ -55,16 +60,42 @@ To learn more about the DefraDB GraphQL Schema Language, refer to https://docs.s return err } patch = string(data) - case len(args) > 0: + case len(args) >= 1: patch = args[0] default: return fmt.Errorf("patch cannot be empty") } - return store.PatchSchema(cmd.Context(), patch, setDefault) + var lensCfgJson string + switch { + case lensFile != "": + data, err := os.ReadFile(lensFile) + if err != nil { + return err + } + patch = string(data) + case len(args) == 2: + lensCfgJson = args[1] + } + + decoder := json.NewDecoder(strings.NewReader(lensCfgJson)) + decoder.DisallowUnknownFields() + + var migration immutable.Option[model.Lens] + if lensCfgJson != "" { + var lensCfg model.Lens + if err := decoder.Decode(&lensCfg); err != nil { + return NewErrInvalidLensConfig(err) + } + migration = immutable.Some(lensCfg) + } + + return store.PatchSchema(cmd.Context(), patch, migration, setActive) }, } - cmd.Flags().BoolVar(&setDefault, "set-default", false, "Set default schema version") - cmd.Flags().StringVarP(&patchFile, "file", "f", "", "File to load a patch from") + cmd.Flags().BoolVar(&setActive, "set-active", false, + "Set the active schema version for all collections using the root schem") + cmd.Flags().StringVarP(&patchFile, "patch-file", "p", "", "File to load a patch from") + cmd.Flags().StringVarP(&lensFile, "lens-file", "t", "", "File to load a lens config from") return cmd } diff --git a/cli/schema_set_default.go b/cli/schema_set_active.go similarity index 60% rename from cli/schema_set_default.go rename to cli/schema_set_active.go index cdb6bd8bd8..4f19ef12aa 100644 --- a/cli/schema_set_default.go +++ b/cli/schema_set_active.go @@ -14,15 +14,16 @@ import ( "github.com/spf13/cobra" ) -func MakeSchemaSetDefaultCommand() *cobra.Command { +func MakeSchemaSetActiveCommand() *cobra.Command { var cmd = &cobra.Command{ - Use: "set-default [versionID]", - Short: "Set the default schema version", - Long: `Set the default schema version`, - Args: cobra.ExactArgs(1), + Use: "set-active [versionID]", + Short: "Set the active collection version", + Long: `Activates all collection versions with the given schema version, and deactivates all +those without it (if they share the same schema root).`, + Args: cobra.ExactArgs(1), RunE: func(cmd *cobra.Command, args []string) error { store := mustGetStoreContext(cmd) - return store.SetDefaultSchemaVersion(cmd.Context(), args[0]) + return store.SetActiveSchemaVersion(cmd.Context(), args[0]) }, } return cmd diff --git a/client/db.go b/client/db.go index 240d2d5dfc..a39c125e16 100644 --- a/client/db.go +++ b/client/db.go @@ -14,6 +14,8 @@ import ( "context" blockstore "github.com/ipfs/boxo/blockstore" + "github.com/lens-vm/lens/host-go/config/model" + "github.com/sourcenetwork/immutable" "github.com/sourcenetwork/defradb/datastore" "github.com/sourcenetwork/defradb/events" @@ -98,8 +100,10 @@ type Store interface { AddSchema(context.Context, string) ([]CollectionDescription, error) // PatchSchema takes the given JSON patch string and applies it to the set of SchemaDescriptions - // present in the database. If true is provided, the new schema versions will be made default, otherwise - // [SetDefaultSchemaVersion] should be called to set them so. + // present in the database. + // + // If true is provided, the new schema versions will be made active and previous versions deactivated, otherwise + // [SetDefaultSchemaVersion] should be called to do so. // // It will also update the GQL types used by the query system. It will error and not apply any of the // requested, valid updates should the net result of the patch result in an invalid state. The @@ -112,16 +116,18 @@ type Store interface { // // Field [FieldKind] values may be provided in either their raw integer form, or as string as per // [FieldKindStringToEnumMapping]. - PatchSchema(context.Context, string, bool) error + // + // A lens configuration may also be provided, it will be added to all collections using the schema. + PatchSchema(context.Context, string, immutable.Option[model.Lens], bool) error - // SetDefaultSchemaVersion sets the default schema version to the ID provided. It will be applied to all - // collections using the schema. + // SetActiveSchemaVersion activates all collection versions with the given schema version, and deactivates all + // those without it (if they share the same schema root). // // This will affect all operations interacting with the schema where a schema version is not explicitly // provided. This includes GQL queries and Collection operations. // // It will return an error if the provided schema version ID does not exist. - SetDefaultSchemaVersion(context.Context, string) error + SetActiveSchemaVersion(context.Context, string) error // AddView creates a new Defra View. // @@ -151,10 +157,9 @@ type Store interface { // will be returned. This function does not execute the given query. AddView(ctx context.Context, gqlQuery string, sdl string) ([]CollectionDefinition, error) - // SetMigration sets the migration for the given source-destination schema version IDs. Is equivalent to - // calling `LensRegistry().SetMigration(ctx, cfg)`. + // SetMigration sets the migration for all collections using the given source-destination schema version IDs. // - // There may only be one migration per schema version id. If another migration was registered it will be + // There may only be one migration per collection version. If another migration was registered it will be // overwritten by this migration. // // Neither of the schema version IDs specified in the configuration need to exist at the time of calling. @@ -185,9 +190,12 @@ type Store interface { // If no matching collections are found an empty set will be returned. GetCollectionsByVersionID(context.Context, string) ([]Collection, error) - // GetAllCollections returns all the collections and their descriptions that currently exist within + // GetAllCollections returns all collections and their descriptions that currently exist within // this [Store]. - GetAllCollections(context.Context) ([]Collection, error) + // + // If `true` is provided, the results will include inactive collections. If `false`, only active collections + // will be returned. + GetAllCollections(context.Context, bool) ([]Collection, error) // GetSchemasByName returns the all schema versions with the given name. GetSchemasByName(context.Context, string) ([]SchemaDescription, error) diff --git a/client/descriptions.go b/client/descriptions.go index 80f08b6a1c..4a1065735b 100644 --- a/client/descriptions.go +++ b/client/descriptions.go @@ -13,12 +13,22 @@ package client import ( "encoding/json" "fmt" + "math" + "github.com/lens-vm/lens/host-go/config/model" "github.com/sourcenetwork/immutable" "github.com/sourcenetwork/defradb/client/request" ) +// CollectionDescription with no known root will take this ID as their temporary RootID. +// +// Orphan CollectionDescriptions are typically created when setting migrations from schema versions +// that do not yet exist. The OrphanRootID will be replaced with the actual RootID once a full chain +// of schema versions leading back to a schema version used by a collection with a non-orphan RootID +// has been established. +const OrphanRootID uint32 = math.MaxUint32 + // CollectionDescription describes a Collection and all its associated metadata. type CollectionDescription struct { // Name contains the name of the collection. @@ -32,6 +42,14 @@ type CollectionDescription struct { // It is immutable. ID uint32 + // RootID is the local root identifier of this collection, linking together a chain of + // collection instances on different schema versions. + // + // Collections sharing the same RootID will be compatable with each other, with the documents + // within them shared and yielded as if they were in the same set, using Lens transforms to + // migrate between schema versions when provided. + RootID uint32 + // The ID of the schema version that this collection is at. SchemaVersionID string @@ -39,6 +57,7 @@ type CollectionDescription struct { // // Currently supported source types are: // - [QuerySource] + // - [CollectionSource] Sources []any // Indexes contains the secondary indexes that this Collection has. @@ -94,6 +113,10 @@ func (col CollectionDescription) QuerySources() []*QuerySource { return sourcesOfType[*QuerySource](col) } +func (col CollectionDescription) CollectionSources() []*CollectionSource { + return sourcesOfType[*CollectionSource](col) +} + func sourcesOfType[ResultType any](col CollectionDescription) []ResultType { result := []ResultType{} for _, source := range col.Sources { @@ -113,6 +136,28 @@ type QuerySource struct { Query request.Select } +// QuerySource represents a collection data source from another collection instance. +// +// Data against all collection instances in a CollectionSource chain will be returned as-if +// from the same dataset when queried. Lens transforms may be applied between instances. +// +// Typically these are used to link together multiple schema versions into the same dataset. +type CollectionSource struct { + // SourceCollectionID is the local identifier of the source [CollectionDescription] from which to + // share data. + // + // This is a bi-directional relationship, and documents in the host collection instance will also + // be available to the source collection instance. + SourceCollectionID uint32 + + // Transform is a optional Lens configuration. If specified, data drawn from the source will have the + // transform applied before being returned by any operation on the host collection instance. + // + // If the transform supports an inverse operation, that inverse will be applied when the source collection + // draws data from this host. + Transform immutable.Option[model.Lens] +} + // SchemaDescription describes a Schema and its associated metadata. type SchemaDescription struct { // Root is the version agnostic identifier for this schema. @@ -336,6 +381,7 @@ type collectionDescription struct { // These properties are unmarshalled using the default json unmarshaller Name immutable.Option[string] ID uint32 + RootID uint32 SchemaVersionID string Indexes []IndexDescription @@ -352,6 +398,7 @@ func (c *CollectionDescription) UnmarshalJSON(bytes []byte) error { c.Name = descMap.Name c.ID = descMap.ID + c.RootID = descMap.RootID c.SchemaVersionID = descMap.SchemaVersionID c.Indexes = descMap.Indexes c.Sources = make([]any, len(descMap.Sources)) @@ -376,6 +423,14 @@ func (c *CollectionDescription) UnmarshalJSON(bytes []byte) error { return err } sourceValue = &querySource + } else if _, ok := source["SourceCollectionID"]; ok { + // This must be a CollectionSource, as only the `CollectionSource` type has a `SourceCollectionID` field + var collectionSource CollectionSource + err := json.Unmarshal(sourceJson, &collectionSource) + if err != nil { + return err + } + sourceValue = &collectionSource } else { return ErrFailedToUnmarshalCollection } diff --git a/client/lens.go b/client/lens.go index 35ef9f1ee3..1a6b423991 100644 --- a/client/lens.go +++ b/client/lens.go @@ -49,49 +49,35 @@ type LensRegistry interface { // after this has been created, the results of those commits will be visible within this scope. WithTxn(datastore.Txn) LensRegistry - // SetMigration sets the migration for the given source-destination schema version IDs. Is equivalent to - // calling `Store.SetMigration(ctx, cfg)`. + // SetMigration caches the migration for the given collection ID. It does not persist the migration in long + // term storage, for that one should call [Store.SetMigration(ctx, cfg)]. // - // There may only be one migration per schema version id. If another migration was registered it will be + // There may only be one migration per collection. If another migration was registered it will be // overwritten by this migration. // - // Neither of the schema version IDs specified in the configuration need to exist at the time of calling. - // This is to allow the migration of documents of schema versions unknown to the local node received by the - // P2P system. - // // Migrations will only run if there is a complete path from the document schema version to the latest local // schema version. - SetMigration(context.Context, LensConfig) error + SetMigration(context.Context, uint32, model.Lens) error // ReloadLenses clears any cached migrations, loads their configurations from the database and re-initializes // them. It is run on database start if the database already existed. ReloadLenses(context.Context) error // MigrateUp returns an enumerable that feeds the given source through the Lens migration for the given - // schema version id if one is found, if there is no matching migration the given source will be returned. + // collection id if one is found, if there is no matching migration the given source will be returned. MigrateUp( context.Context, enumerable.Enumerable[map[string]any], - string, + uint32, ) (enumerable.Enumerable[map[string]any], error) - // MigrateDown returns an enumerable that feeds the given source through the Lens migration for the schema - // version that precedes the given schema version id in reverse, if one is found, if there is no matching - // migration the given source will be returned. + // MigrateDown returns an enumerable that feeds the given source through the Lens migration for the given + // collection id in reverse if one is found, if there is no matching migration the given source will be returned. // // This downgrades any documents in the source enumerable if/when enumerated. MigrateDown( context.Context, enumerable.Enumerable[map[string]any], - string, + uint32, ) (enumerable.Enumerable[map[string]any], error) - - // Config returns a slice of the configurations of the currently loaded migrations. - // - // Modifying the slice does not affect the loaded configurations. - Config(context.Context) ([]LensConfig, error) - - // HasMigration returns true if there is a migration registered for the given schema version id, otherwise - // will return false. - HasMigration(context.Context, string) (bool, error) } diff --git a/client/mocks/collection.go b/client/mocks/collection.go index b1fac9c243..6e6c7afae3 100644 --- a/client/mocks/collection.go +++ b/client/mocks/collection.go @@ -9,6 +9,8 @@ import ( datastore "github.com/sourcenetwork/defradb/datastore" + immutable "github.com/sourcenetwork/immutable" + mock "github.com/stretchr/testify/mock" ) @@ -821,14 +823,14 @@ func (_c *Collection_ID_Call) RunAndReturn(run func() uint32) *Collection_ID_Cal } // Name provides a mock function with given fields: -func (_m *Collection) Name() string { +func (_m *Collection) Name() immutable.Option[string] { ret := _m.Called() - var r0 string - if rf, ok := ret.Get(0).(func() string); ok { + var r0 immutable.Option[string] + if rf, ok := ret.Get(0).(func() immutable.Option[string]); ok { r0 = rf() } else { - r0 = ret.Get(0).(string) + r0 = ret.Get(0).(immutable.Option[string]) } return r0 @@ -851,12 +853,12 @@ func (_c *Collection_Name_Call) Run(run func()) *Collection_Name_Call { return _c } -func (_c *Collection_Name_Call) Return(_a0 string) *Collection_Name_Call { +func (_c *Collection_Name_Call) Return(_a0 immutable.Option[string]) *Collection_Name_Call { _c.Call.Return(_a0) return _c } -func (_c *Collection_Name_Call) RunAndReturn(run func() string) *Collection_Name_Call { +func (_c *Collection_Name_Call) RunAndReturn(run func() immutable.Option[string]) *Collection_Name_Call { _c.Call.Return(run) return _c } diff --git a/client/mocks/db.go b/client/mocks/db.go index 90dc8986d0..158ad6d449 100644 --- a/client/mocks/db.go +++ b/client/mocks/db.go @@ -12,7 +12,11 @@ import ( events "github.com/sourcenetwork/defradb/events" + immutable "github.com/sourcenetwork/immutable" + mock "github.com/stretchr/testify/mock" + + model "github.com/lens-vm/lens/host-go/config/model" ) // DB is an autogenerated mock type for the DB type @@ -386,25 +390,25 @@ func (_c *DB_ExecRequest_Call) RunAndReturn(run func(context.Context, string) *c return _c } -// GetAllCollections provides a mock function with given fields: _a0 -func (_m *DB) GetAllCollections(_a0 context.Context) ([]client.Collection, error) { - ret := _m.Called(_a0) +// GetAllCollections provides a mock function with given fields: _a0, _a1 +func (_m *DB) GetAllCollections(_a0 context.Context, _a1 bool) ([]client.Collection, error) { + ret := _m.Called(_a0, _a1) var r0 []client.Collection var r1 error - if rf, ok := ret.Get(0).(func(context.Context) ([]client.Collection, error)); ok { - return rf(_a0) + if rf, ok := ret.Get(0).(func(context.Context, bool) ([]client.Collection, error)); ok { + return rf(_a0, _a1) } - if rf, ok := ret.Get(0).(func(context.Context) []client.Collection); ok { - r0 = rf(_a0) + if rf, ok := ret.Get(0).(func(context.Context, bool) []client.Collection); ok { + r0 = rf(_a0, _a1) } else { if ret.Get(0) != nil { r0 = ret.Get(0).([]client.Collection) } } - if rf, ok := ret.Get(1).(func(context.Context) error); ok { - r1 = rf(_a0) + if rf, ok := ret.Get(1).(func(context.Context, bool) error); ok { + r1 = rf(_a0, _a1) } else { r1 = ret.Error(1) } @@ -419,13 +423,14 @@ type DB_GetAllCollections_Call struct { // GetAllCollections is a helper method to define mock.On call // - _a0 context.Context -func (_e *DB_Expecter) GetAllCollections(_a0 interface{}) *DB_GetAllCollections_Call { - return &DB_GetAllCollections_Call{Call: _e.mock.On("GetAllCollections", _a0)} +// - _a1 bool +func (_e *DB_Expecter) GetAllCollections(_a0 interface{}, _a1 interface{}) *DB_GetAllCollections_Call { + return &DB_GetAllCollections_Call{Call: _e.mock.On("GetAllCollections", _a0, _a1)} } -func (_c *DB_GetAllCollections_Call) Run(run func(_a0 context.Context)) *DB_GetAllCollections_Call { +func (_c *DB_GetAllCollections_Call) Run(run func(_a0 context.Context, _a1 bool)) *DB_GetAllCollections_Call { _c.Call.Run(func(args mock.Arguments) { - run(args[0].(context.Context)) + run(args[0].(context.Context), args[1].(bool)) }) return _c } @@ -435,7 +440,7 @@ func (_c *DB_GetAllCollections_Call) Return(_a0 []client.Collection, _a1 error) return _c } -func (_c *DB_GetAllCollections_Call) RunAndReturn(run func(context.Context) ([]client.Collection, error)) *DB_GetAllCollections_Call { +func (_c *DB_GetAllCollections_Call) RunAndReturn(run func(context.Context, bool) ([]client.Collection, error)) *DB_GetAllCollections_Call { _c.Call.Return(run) return _c } @@ -1070,13 +1075,13 @@ func (_c *DB_NewTxn_Call) RunAndReturn(run func(context.Context, bool) (datastor return _c } -// PatchSchema provides a mock function with given fields: _a0, _a1, _a2 -func (_m *DB) PatchSchema(_a0 context.Context, _a1 string, _a2 bool) error { - ret := _m.Called(_a0, _a1, _a2) +// PatchSchema provides a mock function with given fields: _a0, _a1, _a2, _a3 +func (_m *DB) PatchSchema(_a0 context.Context, _a1 string, _a2 immutable.Option[model.Lens], _a3 bool) error { + ret := _m.Called(_a0, _a1, _a2, _a3) var r0 error - if rf, ok := ret.Get(0).(func(context.Context, string, bool) error); ok { - r0 = rf(_a0, _a1, _a2) + if rf, ok := ret.Get(0).(func(context.Context, string, immutable.Option[model.Lens], bool) error); ok { + r0 = rf(_a0, _a1, _a2, _a3) } else { r0 = ret.Error(0) } @@ -1092,14 +1097,15 @@ type DB_PatchSchema_Call struct { // PatchSchema is a helper method to define mock.On call // - _a0 context.Context // - _a1 string -// - _a2 bool -func (_e *DB_Expecter) PatchSchema(_a0 interface{}, _a1 interface{}, _a2 interface{}) *DB_PatchSchema_Call { - return &DB_PatchSchema_Call{Call: _e.mock.On("PatchSchema", _a0, _a1, _a2)} +// - _a2 immutable.Option[model.Lens] +// - _a3 bool +func (_e *DB_Expecter) PatchSchema(_a0 interface{}, _a1 interface{}, _a2 interface{}, _a3 interface{}) *DB_PatchSchema_Call { + return &DB_PatchSchema_Call{Call: _e.mock.On("PatchSchema", _a0, _a1, _a2, _a3)} } -func (_c *DB_PatchSchema_Call) Run(run func(_a0 context.Context, _a1 string, _a2 bool)) *DB_PatchSchema_Call { +func (_c *DB_PatchSchema_Call) Run(run func(_a0 context.Context, _a1 string, _a2 immutable.Option[model.Lens], _a3 bool)) *DB_PatchSchema_Call { _c.Call.Run(func(args mock.Arguments) { - run(args[0].(context.Context), args[1].(string), args[2].(bool)) + run(args[0].(context.Context), args[1].(string), args[2].(immutable.Option[model.Lens]), args[3].(bool)) }) return _c } @@ -1109,7 +1115,7 @@ func (_c *DB_PatchSchema_Call) Return(_a0 error) *DB_PatchSchema_Call { return _c } -func (_c *DB_PatchSchema_Call) RunAndReturn(run func(context.Context, string, bool) error) *DB_PatchSchema_Call { +func (_c *DB_PatchSchema_Call) RunAndReturn(run func(context.Context, string, immutable.Option[model.Lens], bool) error) *DB_PatchSchema_Call { _c.Call.Return(run) return _c } @@ -1242,8 +1248,8 @@ func (_c *DB_Root_Call) RunAndReturn(run func() datastore.RootStore) *DB_Root_Ca return _c } -// SetDefaultSchemaVersion provides a mock function with given fields: _a0, _a1 -func (_m *DB) SetDefaultSchemaVersion(_a0 context.Context, _a1 string) error { +// SetActiveSchemaVersion provides a mock function with given fields: _a0, _a1 +func (_m *DB) SetActiveSchemaVersion(_a0 context.Context, _a1 string) error { ret := _m.Called(_a0, _a1) var r0 error @@ -1256,31 +1262,31 @@ func (_m *DB) SetDefaultSchemaVersion(_a0 context.Context, _a1 string) error { return r0 } -// DB_SetDefaultSchemaVersion_Call is a *mock.Call that shadows Run/Return methods with type explicit version for method 'SetDefaultSchemaVersion' -type DB_SetDefaultSchemaVersion_Call struct { +// DB_SetActiveSchemaVersion_Call is a *mock.Call that shadows Run/Return methods with type explicit version for method 'SetActiveSchemaVersion' +type DB_SetActiveSchemaVersion_Call struct { *mock.Call } -// SetDefaultSchemaVersion is a helper method to define mock.On call +// SetActiveSchemaVersion is a helper method to define mock.On call // - _a0 context.Context // - _a1 string -func (_e *DB_Expecter) SetDefaultSchemaVersion(_a0 interface{}, _a1 interface{}) *DB_SetDefaultSchemaVersion_Call { - return &DB_SetDefaultSchemaVersion_Call{Call: _e.mock.On("SetDefaultSchemaVersion", _a0, _a1)} +func (_e *DB_Expecter) SetActiveSchemaVersion(_a0 interface{}, _a1 interface{}) *DB_SetActiveSchemaVersion_Call { + return &DB_SetActiveSchemaVersion_Call{Call: _e.mock.On("SetActiveSchemaVersion", _a0, _a1)} } -func (_c *DB_SetDefaultSchemaVersion_Call) Run(run func(_a0 context.Context, _a1 string)) *DB_SetDefaultSchemaVersion_Call { +func (_c *DB_SetActiveSchemaVersion_Call) Run(run func(_a0 context.Context, _a1 string)) *DB_SetActiveSchemaVersion_Call { _c.Call.Run(func(args mock.Arguments) { run(args[0].(context.Context), args[1].(string)) }) return _c } -func (_c *DB_SetDefaultSchemaVersion_Call) Return(_a0 error) *DB_SetDefaultSchemaVersion_Call { +func (_c *DB_SetActiveSchemaVersion_Call) Return(_a0 error) *DB_SetActiveSchemaVersion_Call { _c.Call.Return(_a0) return _c } -func (_c *DB_SetDefaultSchemaVersion_Call) RunAndReturn(run func(context.Context, string) error) *DB_SetDefaultSchemaVersion_Call { +func (_c *DB_SetActiveSchemaVersion_Call) RunAndReturn(run func(context.Context, string) error) *DB_SetActiveSchemaVersion_Call { _c.Call.Return(run) return _c } diff --git a/core/data_test.go b/core/data_test.go index ccad8163a7..1ba5a71611 100644 --- a/core/data_test.go +++ b/core/data_test.go @@ -25,8 +25,8 @@ func TestMergeAscending_ReturnsEmpty_GivenEmpty(t *testing.T) { } func TestMergeAscending_ReturnsSingle_GivenSingle(t *testing.T) { - start1 := "/p/0/0/k1" - end1 := "/p/0/0/k2" + start1 := "/1/p/0/k1" + end1 := "/1/p/0/k2" input := []Span{NewSpan(MustNewDataStoreKey(start1), MustNewDataStoreKey(end1))} result := MergeAscending(input) @@ -37,10 +37,10 @@ func TestMergeAscending_ReturnsSingle_GivenSingle(t *testing.T) { } func TestMergeAscending_ReturnsSecondBeforeFirst_GivenKeysInReverseOrder(t *testing.T) { - start1 := "/p/0/0/k4" - end1 := "/p/0/0/k5" - start2 := "/p/0/0/k1" - end2 := "/p/0/0/k2" + start1 := "/1/p/0/k4" + end1 := "/1/p/0/k5" + start2 := "/1/p/0/k1" + end2 := "/1/p/0/k2" input := []Span{ NewSpan(MustNewDataStoreKey(start1), MustNewDataStoreKey(end1)), @@ -57,12 +57,12 @@ func TestMergeAscending_ReturnsSecondBeforeFirst_GivenKeysInReverseOrder(t *test } func TestMergeAscending_ReturnsItemsInOrder_GivenKeysInMixedOrder(t *testing.T) { - start1 := "/p/0/0/k1" - end1 := "/p/0/0/k2" - start2 := "/p/0/0/k7" - end2 := "/p/0/0/k8" - start3 := "/p/0/0/k4" - end3 := "/p/0/0/k5" + start1 := "/1/p/0/k1" + end1 := "/1/p/0/k2" + start2 := "/1/p/0/k7" + end2 := "/1/p/0/k8" + start3 := "/1/p/0/k4" + end3 := "/1/p/0/k5" input := []Span{ NewSpan(MustNewDataStoreKey(start1), MustNewDataStoreKey(end1)), @@ -83,10 +83,10 @@ func TestMergeAscending_ReturnsItemsInOrder_GivenKeysInMixedOrder(t *testing.T) } func TestMergeAscending_ReturnsSingle_GivenStartBeforeEndEqualToStart(t *testing.T) { - start1 := "/p/0/0/k3" - end1 := "/p/0/0/k4" - start2 := "/p/0/0/k1" - end2 := "/p/0/0/k3" + start1 := "/1/p/0/k3" + end1 := "/1/p/0/k4" + start2 := "/1/p/0/k1" + end2 := "/1/p/0/k3" input := []Span{ NewSpan(MustNewDataStoreKey(start1), MustNewDataStoreKey(end1)), NewSpan(MustNewDataStoreKey(start2), MustNewDataStoreKey(end2)), @@ -100,10 +100,10 @@ func TestMergeAscending_ReturnsSingle_GivenStartBeforeEndEqualToStart(t *testing } func TestMergeAscending_ReturnsSingle_GivenStartBeforeEndAdjacentToStart(t *testing.T) { - start1 := "/p/0/0/k3" - end1 := "/p/0/0/k4" - start2 := "/p/0/0/k1" - end2 := "/p/0/0/k2" + start1 := "/1/p/0/k3" + end1 := "/1/p/0/k4" + start2 := "/1/p/0/k1" + end2 := "/1/p/0/k2" input := []Span{ NewSpan(MustNewDataStoreKey(start1), MustNewDataStoreKey(end1)), NewSpan(MustNewDataStoreKey(start2), MustNewDataStoreKey(end2)), @@ -117,10 +117,10 @@ func TestMergeAscending_ReturnsSingle_GivenStartBeforeEndAdjacentToStart(t *test } func TestMergeAscending_ReturnsSingle_GivenStartBeforeEndWithin(t *testing.T) { - start1 := "/p/0/0/k3" - end1 := "/p/0/0/k4" - start2 := "/p/0/0/k1" - end2 := "/p/0/0/k3.5" + start1 := "/1/p/0/k3" + end1 := "/1/p/0/k4" + start2 := "/1/p/0/k1" + end2 := "/1/p/0/k3.5" input := []Span{ NewSpan(MustNewDataStoreKey(start1), MustNewDataStoreKey(end1)), NewSpan(MustNewDataStoreKey(start2), MustNewDataStoreKey(end2)), @@ -134,10 +134,10 @@ func TestMergeAscending_ReturnsSingle_GivenStartBeforeEndWithin(t *testing.T) { } func TestMergeAscending_ReturnsSingle_GivenStartPrefixesEndWithin(t *testing.T) { - start1 := "/p/0/0/k1.1" - end1 := "/p/0/0/k3" - start2 := "/p/0/0/k1" - end2 := "/p/0/0/k2.5" + start1 := "/1/p/0/k1.1" + end1 := "/1/p/0/k3" + start2 := "/1/p/0/k1" + end2 := "/1/p/0/k2.5" input := []Span{ NewSpan(MustNewDataStoreKey(start1), MustNewDataStoreKey(end1)), NewSpan(MustNewDataStoreKey(start2), MustNewDataStoreKey(end2)), @@ -151,10 +151,10 @@ func TestMergeAscending_ReturnsSingle_GivenStartPrefixesEndWithin(t *testing.T) } func TestMergeAscending_ReturnsSingle_GivenStartBeforeEndWithinEndPrefix(t *testing.T) { - start1 := "/p/0/0/k3" - end1 := "/p/0/0/k4" - start2 := "/p/0/0/k1" - end2 := "/p/0/0/k4.5" + start1 := "/1/p/0/k3" + end1 := "/1/p/0/k4" + start2 := "/1/p/0/k1" + end2 := "/1/p/0/k4.5" input := []Span{ NewSpan(MustNewDataStoreKey(start1), MustNewDataStoreKey(end1)), NewSpan(MustNewDataStoreKey(start2), MustNewDataStoreKey(end2)), @@ -168,10 +168,10 @@ func TestMergeAscending_ReturnsSingle_GivenStartBeforeEndWithinEndPrefix(t *test } func TestMergeAscending_ReturnsSingle_GivenStartPrefixesEndWithinEndPrefix(t *testing.T) { - start1 := "/p/0/0/k1.1" - end1 := "/p/0/0/k3" - start2 := "/p/0/0/k1" - end2 := "/p/0/0/k3.5" + start1 := "/1/p/0/k1.1" + end1 := "/1/p/0/k3" + start2 := "/1/p/0/k1" + end2 := "/1/p/0/k3.5" input := []Span{ NewSpan(MustNewDataStoreKey(start1), MustNewDataStoreKey(end1)), NewSpan(MustNewDataStoreKey(start2), MustNewDataStoreKey(end2)), @@ -185,10 +185,10 @@ func TestMergeAscending_ReturnsSingle_GivenStartPrefixesEndWithinEndPrefix(t *te } func TestMergeAscending_ReturnsSingle_GivenStartBeforeEndEqual(t *testing.T) { - start1 := "/p/0/0/k3" - end1 := "/p/0/0/k4" - start2 := "/p/0/0/k1" - end2 := "/p/0/0/k4" + start1 := "/1/p/0/k3" + end1 := "/1/p/0/k4" + start2 := "/1/p/0/k1" + end2 := "/1/p/0/k4" input := []Span{ NewSpan(MustNewDataStoreKey(start1), MustNewDataStoreKey(end1)), NewSpan(MustNewDataStoreKey(start2), MustNewDataStoreKey(end2)), @@ -202,10 +202,10 @@ func TestMergeAscending_ReturnsSingle_GivenStartBeforeEndEqual(t *testing.T) { } func TestMergeAscending_ReturnsSingle_GivenStartBeforeEndAdjacentAndBefore(t *testing.T) { - start1 := "/p/0/0/k3" - end1 := "/p/0/0/k5" - start2 := "/p/0/0/k1" - end2 := "/p/0/0/k4" + start1 := "/1/p/0/k3" + end1 := "/1/p/0/k5" + start2 := "/1/p/0/k1" + end2 := "/1/p/0/k4" input := []Span{ NewSpan(MustNewDataStoreKey(start1), MustNewDataStoreKey(end1)), NewSpan(MustNewDataStoreKey(start2), MustNewDataStoreKey(end2)), @@ -219,10 +219,10 @@ func TestMergeAscending_ReturnsSingle_GivenStartBeforeEndAdjacentAndBefore(t *te } func TestMergeAscending_ReturnsSingle_GivenStartBeforeEndAdjacentAndGreater(t *testing.T) { - start1 := "/p/0/0/k3" - end1 := "/p/0/0/k4" - start2 := "/p/0/0/k1" - end2 := "/p/0/0/k5" + start1 := "/1/p/0/k3" + end1 := "/1/p/0/k4" + start2 := "/1/p/0/k1" + end2 := "/1/p/0/k5" input := []Span{ NewSpan(MustNewDataStoreKey(start1), MustNewDataStoreKey(end1)), NewSpan(MustNewDataStoreKey(start2), MustNewDataStoreKey(end2)), @@ -236,10 +236,10 @@ func TestMergeAscending_ReturnsSingle_GivenStartBeforeEndAdjacentAndGreater(t *t } func TestMergeAscending_ReturnsSingle_GivenStartPrefixesEndEqual(t *testing.T) { - start1 := "/p/0/0/k1.1" - end1 := "/p/0/0/k3" - start2 := "/p/0/0/k1" - end2 := "/p/0/0/k3" + start1 := "/1/p/0/k1.1" + end1 := "/1/p/0/k3" + start2 := "/1/p/0/k1" + end2 := "/1/p/0/k3" input := []Span{ NewSpan(MustNewDataStoreKey(start1), MustNewDataStoreKey(end1)), NewSpan(MustNewDataStoreKey(start2), MustNewDataStoreKey(end2)), @@ -253,10 +253,10 @@ func TestMergeAscending_ReturnsSingle_GivenStartPrefixesEndEqual(t *testing.T) { } func TestMergeAscending_ReturnsSingle_GivenStartPrefixesEndAdjacentAndBefore(t *testing.T) { - start1 := "/p/0/0/k1.1" - end1 := "/p/0/0/k3" - start2 := "/p/0/0/k1" - end2 := "/p/0/0/k2" + start1 := "/1/p/0/k1.1" + end1 := "/1/p/0/k3" + start2 := "/1/p/0/k1" + end2 := "/1/p/0/k2" input := []Span{ NewSpan(MustNewDataStoreKey(start1), MustNewDataStoreKey(end1)), NewSpan(MustNewDataStoreKey(start2), MustNewDataStoreKey(end2)), @@ -270,10 +270,10 @@ func TestMergeAscending_ReturnsSingle_GivenStartPrefixesEndAdjacentAndBefore(t * } func TestMergeAscending_ReturnsSingle_GivenStartPrefixesEndAdjacentAndAfter(t *testing.T) { - start1 := "/p/0/0/k1.1" - end1 := "/p/0/0/k3" - start2 := "/p/0/0/k1" - end2 := "/p/0/0/k4" + start1 := "/1/p/0/k1.1" + end1 := "/1/p/0/k3" + start2 := "/1/p/0/k1" + end2 := "/1/p/0/k4" input := []Span{ NewSpan(MustNewDataStoreKey(start1), MustNewDataStoreKey(end1)), NewSpan(MustNewDataStoreKey(start2), MustNewDataStoreKey(end2)), @@ -287,16 +287,16 @@ func TestMergeAscending_ReturnsSingle_GivenStartPrefixesEndAdjacentAndAfter(t *t } func TestMergeAscending_ReturnsMiddleSpansMerged_GivenSpanCoveringMiddleSpans(t *testing.T) { - start1 := "/p/0/0/k1" - end1 := "/p/0/0/k2" - start2 := "/p/0/0/k6" - end2 := "/p/0/0/k7" - start3 := "/p/0/0/k9" - end3 := "/p/0/0/ka" - start4 := "/p/0/0/kc" - end4 := "/p/0/0/kd" - start5 := "/p/0/0/k4" - end5 := "/p/0/0/ka" + start1 := "/1/p/0/k1" + end1 := "/1/p/0/k2" + start2 := "/1/p/0/k6" + end2 := "/1/p/0/k7" + start3 := "/1/p/0/k9" + end3 := "/1/p/0/ka" + start4 := "/1/p/0/kc" + end4 := "/1/p/0/kd" + start5 := "/1/p/0/k4" + end5 := "/1/p/0/ka" input := []Span{ NewSpan(MustNewDataStoreKey(start1), MustNewDataStoreKey(end1)), NewSpan(MustNewDataStoreKey(start2), MustNewDataStoreKey(end2)), @@ -318,10 +318,10 @@ func TestMergeAscending_ReturnsMiddleSpansMerged_GivenSpanCoveringMiddleSpans(t } func TestMergeAscending_ReturnsSingle_GivenStartEqualEndWithin(t *testing.T) { - start1 := "/p/0/0/k1" - end1 := "/p/0/0/k2" - start2 := "/p/0/0/k1" - end2 := "/p/0/0/k1.5" + start1 := "/1/p/0/k1" + end1 := "/1/p/0/k2" + start2 := "/1/p/0/k1" + end2 := "/1/p/0/k1.5" input := []Span{ NewSpan(MustNewDataStoreKey(start1), MustNewDataStoreKey(end1)), NewSpan(MustNewDataStoreKey(start2), MustNewDataStoreKey(end2)), @@ -335,10 +335,10 @@ func TestMergeAscending_ReturnsSingle_GivenStartEqualEndWithin(t *testing.T) { } func TestMergeAscending_ReturnsSingle_GivenStartEqualEndWithinEndPrefix(t *testing.T) { - start1 := "/p/0/0/k1" - end1 := "/p/0/0/k2" - start2 := "/p/0/0/k1" - end2 := "/p/0/0/k2.5" + start1 := "/1/p/0/k1" + end1 := "/1/p/0/k2" + start2 := "/1/p/0/k1" + end2 := "/1/p/0/k2.5" input := []Span{ NewSpan(MustNewDataStoreKey(start1), MustNewDataStoreKey(end1)), NewSpan(MustNewDataStoreKey(start2), MustNewDataStoreKey(end2)), @@ -352,8 +352,8 @@ func TestMergeAscending_ReturnsSingle_GivenStartEqualEndWithinEndPrefix(t *testi } func TestMergeAscending_ReturnsSingle_GivenDuplicates(t *testing.T) { - start1 := "/p/0/0/k1" - end1 := "/p/0/0/k2" + start1 := "/1/p/0/k1" + end1 := "/1/p/0/k2" input := []Span{ NewSpan(MustNewDataStoreKey(start1), MustNewDataStoreKey(end1)), NewSpan(MustNewDataStoreKey(start1), MustNewDataStoreKey(end1)), @@ -367,10 +367,10 @@ func TestMergeAscending_ReturnsSingle_GivenDuplicates(t *testing.T) { } func TestMergeAscending_ReturnsSingle_GivenStartWithinEndWithin(t *testing.T) { - start1 := "/p/0/0/k1" - end1 := "/p/0/0/k2" - start2 := "/p/0/0/k1.2" - end2 := "/p/0/0/k1.5" + start1 := "/1/p/0/k1" + end1 := "/1/p/0/k2" + start2 := "/1/p/0/k1.2" + end2 := "/1/p/0/k1.5" input := []Span{ NewSpan(MustNewDataStoreKey(start1), MustNewDataStoreKey(end1)), NewSpan(MustNewDataStoreKey(start2), MustNewDataStoreKey(end2)), @@ -384,10 +384,10 @@ func TestMergeAscending_ReturnsSingle_GivenStartWithinEndWithin(t *testing.T) { } func TestMergeAscending_ReturnsSingle_GivenStartWithinEndWithinEndPrefix(t *testing.T) { - start1 := "/p/0/0/k1" - end1 := "/p/0/0/k2" - start2 := "/p/0/0/k1.2" - end2 := "/p/0/0/k2.5" + start1 := "/1/p/0/k1" + end1 := "/1/p/0/k2" + start2 := "/1/p/0/k1.2" + end2 := "/1/p/0/k2.5" input := []Span{ NewSpan(MustNewDataStoreKey(start1), MustNewDataStoreKey(end1)), NewSpan(MustNewDataStoreKey(start2), MustNewDataStoreKey(end2)), @@ -401,10 +401,10 @@ func TestMergeAscending_ReturnsSingle_GivenStartWithinEndWithinEndPrefix(t *test } func TestMergeAscending_ReturnsSingle_GivenStartWithinEndEqual(t *testing.T) { - start1 := "/p/0/0/k1" - end1 := "/p/0/0/k2" - start2 := "/p/0/0/k1.2" - end2 := "/p/0/0/k2" + start1 := "/1/p/0/k1" + end1 := "/1/p/0/k2" + start2 := "/1/p/0/k1.2" + end2 := "/1/p/0/k2" input := []Span{ NewSpan(MustNewDataStoreKey(start1), MustNewDataStoreKey(end1)), NewSpan(MustNewDataStoreKey(start2), MustNewDataStoreKey(end2)), @@ -418,10 +418,10 @@ func TestMergeAscending_ReturnsSingle_GivenStartWithinEndEqual(t *testing.T) { } func TestMergeAscending_ReturnsSingle_GivenStartWithinEndAdjacentAndBefore(t *testing.T) { - start1 := "/p/0/0/k1" - end1 := "/p/0/0/k3" - start2 := "/p/0/0/k1.2" - end2 := "/p/0/0/k2" + start1 := "/1/p/0/k1" + end1 := "/1/p/0/k3" + start2 := "/1/p/0/k1.2" + end2 := "/1/p/0/k2" input := []Span{ NewSpan(MustNewDataStoreKey(start1), MustNewDataStoreKey(end1)), NewSpan(MustNewDataStoreKey(start2), MustNewDataStoreKey(end2)), @@ -435,10 +435,10 @@ func TestMergeAscending_ReturnsSingle_GivenStartWithinEndAdjacentAndBefore(t *te } func TestMergeAscending_ReturnsSingle_GivenStartWithinEndAdjacentAndAfter(t *testing.T) { - start1 := "/p/0/0/k1" - end1 := "/p/0/0/k3" - start2 := "/p/0/0/k1.2" - end2 := "/p/0/0/k4" + start1 := "/1/p/0/k1" + end1 := "/1/p/0/k3" + start2 := "/1/p/0/k1.2" + end2 := "/1/p/0/k4" input := []Span{ NewSpan(MustNewDataStoreKey(start1), MustNewDataStoreKey(end1)), NewSpan(MustNewDataStoreKey(start2), MustNewDataStoreKey(end2)), @@ -454,16 +454,16 @@ func TestMergeAscending_ReturnsSingle_GivenStartWithinEndAdjacentAndAfter(t *tes func TestMergeAscending_ReturnsMiddleSpansMerged_GivenStartEqualEndAfterSpanCoveringMiddleSpans( t *testing.T, ) { - start1 := "/p/0/0/k1" - end1 := "/p/0/0/k2" - start2 := "/p/0/0/k4" - end2 := "/p/0/0/k5" - start3 := "/p/0/0/k7" - end3 := "/p/0/0/k8" - start4 := "/p/0/0/kc" - end4 := "/p/0/0/kd" - start5 := "/p/0/0/k4" // equal to start2 - end5 := "/p/0/0/ka" + start1 := "/1/p/0/k1" + end1 := "/1/p/0/k2" + start2 := "/1/p/0/k4" + end2 := "/1/p/0/k5" + start3 := "/1/p/0/k7" + end3 := "/1/p/0/k8" + start4 := "/1/p/0/kc" + end4 := "/1/p/0/kd" + start5 := "/1/p/0/k4" // equal to start2 + end5 := "/1/p/0/ka" input := []Span{ NewSpan(MustNewDataStoreKey(start1), MustNewDataStoreKey(end1)), NewSpan(MustNewDataStoreKey(start2), MustNewDataStoreKey(end2)), @@ -487,16 +487,16 @@ func TestMergeAscending_ReturnsMiddleSpansMerged_GivenStartEqualEndAfterSpanCove func TestMergeAscending_ReturnsMiddleSpansMerged_GivenStartWithinEndAfterSpanCoveringMiddleSpans( t *testing.T, ) { - start1 := "/p/0/0/k1" - end1 := "/p/0/0/k2" - start2 := "/p/0/0/k4" - end2 := "/p/0/0/k5" - start3 := "/p/0/0/k7" - end3 := "/p/0/0/k8" - start4 := "/p/0/0/kc" - end4 := "/p/0/0/kd" - start5 := "/p/0/0/k4.5" // within span2 - end5 := "/p/0/0/ka" + start1 := "/1/p/0/k1" + end1 := "/1/p/0/k2" + start2 := "/1/p/0/k4" + end2 := "/1/p/0/k5" + start3 := "/1/p/0/k7" + end3 := "/1/p/0/k8" + start4 := "/1/p/0/kc" + end4 := "/1/p/0/kd" + start5 := "/1/p/0/k4.5" // within span2 + end5 := "/1/p/0/ka" input := []Span{ NewSpan(MustNewDataStoreKey(start1), MustNewDataStoreKey(end1)), NewSpan(MustNewDataStoreKey(start2), MustNewDataStoreKey(end2)), @@ -519,16 +519,16 @@ func TestMergeAscending_ReturnsMiddleSpansMerged_GivenStartWithinEndAfterSpanCov func TestMergeAscending_ReturnsMiddleSpansMerged_GivenStartEqualToEndEndAfterSpanCoveringMiddleSpans( t *testing.T, ) { - start1 := "/p/0/0/k1" - end1 := "/p/0/0/k2" - start2 := "/p/0/0/k4" - end2 := "/p/0/0/k5" - start3 := "/p/0/0/k7" - end3 := "/p/0/0/k8" - start4 := "/p/0/0/kc" - end4 := "/p/0/0/kd" - start5 := "/p/0/0/k5" // span2's end - end5 := "/p/0/0/ka" + start1 := "/1/p/0/k1" + end1 := "/1/p/0/k2" + start2 := "/1/p/0/k4" + end2 := "/1/p/0/k5" + start3 := "/1/p/0/k7" + end3 := "/1/p/0/k8" + start4 := "/1/p/0/kc" + end4 := "/1/p/0/kd" + start5 := "/1/p/0/k5" // span2's end + end5 := "/1/p/0/ka" input := []Span{ NewSpan(MustNewDataStoreKey(start1), MustNewDataStoreKey(end1)), NewSpan(MustNewDataStoreKey(start2), MustNewDataStoreKey(end2)), @@ -551,16 +551,16 @@ func TestMergeAscending_ReturnsMiddleSpansMerged_GivenStartEqualToEndEndAfterSpa func TestMergeAscending_ReturnsMiddleSpansMerged_GivenStartAdjacentAndBeforeEndEndAfterSpanCoveringMiddleSpans( t *testing.T, ) { - start1 := "/p/0/0/k1" - end1 := "/p/0/0/k2" - start2 := "/p/0/0/k4" - end2 := "/p/0/0/k6" - start3 := "/p/0/0/k8" - end3 := "/p/0/0/k9" - start4 := "/p/0/0/kd" - end4 := "/p/0/0/ke" - start5 := "/p/0/0/k5" // adjacent but before span2's end - end5 := "/p/0/0/kb" + start1 := "/1/p/0/k1" + end1 := "/1/p/0/k2" + start2 := "/1/p/0/k4" + end2 := "/1/p/0/k6" + start3 := "/1/p/0/k8" + end3 := "/1/p/0/k9" + start4 := "/1/p/0/kd" + end4 := "/1/p/0/ke" + start5 := "/1/p/0/k5" // adjacent but before span2's end + end5 := "/1/p/0/kb" input := []Span{ NewSpan(MustNewDataStoreKey(start1), MustNewDataStoreKey(end1)), NewSpan(MustNewDataStoreKey(start2), MustNewDataStoreKey(end2)), @@ -583,16 +583,16 @@ func TestMergeAscending_ReturnsMiddleSpansMerged_GivenStartAdjacentAndBeforeEndE func TestMergeAscending_ReturnsMiddleSpansMerged_GivenStartAdjacentAndAfterEndEndAfterSpanCoveringMiddleSpans( t *testing.T, ) { - start1 := "/p/0/0/k1" - end1 := "/p/0/0/k2" - start2 := "/p/0/0/k4" - end2 := "/p/0/0/k5" - start3 := "/p/0/0/k8" - end3 := "/p/0/0/k9" - start4 := "/p/0/0/kd" - end4 := "/p/0/0/ke" - start5 := "/p/0/0/k6" // adjacent and after span2's end - end5 := "/p/0/0/kb" + start1 := "/1/p/0/k1" + end1 := "/1/p/0/k2" + start2 := "/1/p/0/k4" + end2 := "/1/p/0/k5" + start3 := "/1/p/0/k8" + end3 := "/1/p/0/k9" + start4 := "/1/p/0/kd" + end4 := "/1/p/0/ke" + start5 := "/1/p/0/k6" // adjacent and after span2's end + end5 := "/1/p/0/kb" input := []Span{ NewSpan(MustNewDataStoreKey(start1), MustNewDataStoreKey(end1)), NewSpan(MustNewDataStoreKey(start2), MustNewDataStoreKey(end2)), @@ -613,10 +613,10 @@ func TestMergeAscending_ReturnsMiddleSpansMerged_GivenStartAdjacentAndAfterEndEn } func TestMergeAscending_ReturnsTwoItems_GivenSecondItemAfterFirst(t *testing.T) { - start1 := "/p/0/0/k1" - end1 := "/p/0/0/k2" - start2 := "/p/0/0/k4" - end2 := "/p/0/0/k5" + start1 := "/1/p/0/k1" + end1 := "/1/p/0/k2" + start2 := "/1/p/0/k4" + end2 := "/1/p/0/k5" input := []Span{ NewSpan(MustNewDataStoreKey(start1), MustNewDataStoreKey(end1)), NewSpan(MustNewDataStoreKey(start2), MustNewDataStoreKey(end2)), @@ -632,10 +632,10 @@ func TestMergeAscending_ReturnsTwoItems_GivenSecondItemAfterFirst(t *testing.T) } func TestMergeAscending_ReturnsSingle_GivenStartAdjacentAndBeforeEndEndEqual(t *testing.T) { - start1 := "/p/0/0/k3" - end1 := "/p/0/0/k6" - start2 := "/p/0/0/k5" - end2 := "/p/0/0/k6" + start1 := "/1/p/0/k3" + end1 := "/1/p/0/k6" + start2 := "/1/p/0/k5" + end2 := "/1/p/0/k6" input := []Span{ NewSpan(MustNewDataStoreKey(start1), MustNewDataStoreKey(end1)), NewSpan(MustNewDataStoreKey(start2), MustNewDataStoreKey(end2)), @@ -651,10 +651,10 @@ func TestMergeAscending_ReturnsSingle_GivenStartAdjacentAndBeforeEndEndEqual(t * func TestMergeAscending_ReturnsSingle_GivenStartAdjacentAndBeforeEndEndAdjacentAndAfter( t *testing.T, ) { - start1 := "/p/0/0/k3" - end1 := "/p/0/0/k6" - start2 := "/p/0/0/k5" - end2 := "/p/0/0/k7" + start1 := "/1/p/0/k3" + end1 := "/1/p/0/k6" + start2 := "/1/p/0/k5" + end2 := "/1/p/0/k7" input := []Span{ NewSpan(MustNewDataStoreKey(start1), MustNewDataStoreKey(end1)), NewSpan(MustNewDataStoreKey(start2), MustNewDataStoreKey(end2)), @@ -668,10 +668,10 @@ func TestMergeAscending_ReturnsSingle_GivenStartAdjacentAndBeforeEndEndAdjacentA } func TestMergeAscending_ReturnsSingle_GivenStartAdjacentAndBeforeEndEndAfter(t *testing.T) { - start1 := "/p/0/0/k3" - end1 := "/p/0/0/k6" - start2 := "/p/0/0/k5" - end2 := "/p/0/0/k8" + start1 := "/1/p/0/k3" + end1 := "/1/p/0/k6" + start2 := "/1/p/0/k5" + end2 := "/1/p/0/k8" input := []Span{ NewSpan(MustNewDataStoreKey(start1), MustNewDataStoreKey(end1)), NewSpan(MustNewDataStoreKey(start2), MustNewDataStoreKey(end2)), @@ -685,10 +685,10 @@ func TestMergeAscending_ReturnsSingle_GivenStartAdjacentAndBeforeEndEndAfter(t * } func TestMergeAscending_ReturnsSingle_GivenStartAdjacentAndAfterEndEndAfter(t *testing.T) { - start1 := "/p/0/0/k3" - end1 := "/p/0/0/k6" - start2 := "/p/0/0/k7" - end2 := "/p/0/0/k8" + start1 := "/1/p/0/k3" + end1 := "/1/p/0/k6" + start2 := "/1/p/0/k7" + end2 := "/1/p/0/k8" input := []Span{ NewSpan(MustNewDataStoreKey(start1), MustNewDataStoreKey(end1)), NewSpan(MustNewDataStoreKey(start2), MustNewDataStoreKey(end2)), diff --git a/core/key.go b/core/key.go index cb67cc45d6..a4317d43d0 100644 --- a/core/key.go +++ b/core/key.go @@ -47,7 +47,6 @@ const ( COLLECTION_NAME = "/collection/name" COLLECTION_SCHEMA_VERSION = "/collection/version" COLLECTION_INDEX = "/collection/index" - SCHEMA_MIGRATION = "/schema/migration" SCHEMA_VERSION = "/schema/version/v" SCHEMA_VERSION_HISTORY = "/schema/version/h" SEQ = "/seq" @@ -66,10 +65,10 @@ type Key interface { // DataStoreKey is a type that represents a key in the database. type DataStoreKey struct { - CollectionID string - InstanceType InstanceType - DocID string - FieldId string + CollectionRootID uint32 + InstanceType InstanceType + DocID string + FieldId string } var _ Key = (*DataStoreKey)(nil) @@ -87,8 +86,8 @@ type IndexDataStoreKey struct { var _ Key = (*IndexDataStoreKey)(nil) type PrimaryDataStoreKey struct { - CollectionId string - DocID string + CollectionRootID uint32 + DocID string } var _ Key = (*PrimaryDataStoreKey)(nil) @@ -150,26 +149,18 @@ type SchemaVersionKey struct { var _ Key = (*SchemaVersionKey)(nil) -// SchemaHistoryKey holds the pathway through the schema version history for +// SchemaRootKey holds the pathway through the schema version history for // any given schema. // // The key points to the schema version id of the next version of the schema. // If a SchemaHistoryKey does not exist for a given SchemaVersionID it means // that that SchemaVersionID is for the latest version. -type SchemaHistoryKey struct { - SchemaRoot string - PreviousSchemaVersionID string -} - -var _ Key = (*SchemaHistoryKey)(nil) - -// SchemaVersionMigrationKey points to the jsonified configuration of a lens migration -// for the given source schema version id. -type SchemaVersionMigrationKey struct { - SourceSchemaVersionID string +type SchemaRootKey struct { + SchemaRoot string + SchemaVersionID string } -var _ Key = (*SchemaVersionMigrationKey)(nil) +var _ Key = (*SchemaRootKey)(nil) type P2PCollectionKey struct { CollectionID string @@ -193,7 +184,7 @@ var _ Key = (*ReplicatorKey)(nil) // splitting the input using '/' as a field deliminator. It assumes // that the input string is in the following format: // -// /[CollectionId]/[InstanceType]/[DocID]/[FieldId] +// /[CollectionRootId]/[InstanceType]/[DocID]/[FieldId] // // Any properties before the above (assuming a '/' deliminator) are ignored func NewDataStoreKey(key string) (DataStoreKey, error) { @@ -211,7 +202,12 @@ func NewDataStoreKey(key string) (DataStoreKey, error) { return dataStoreKey, ErrInvalidKey } - dataStoreKey.CollectionID = elements[0] + colRootID, err := strconv.Atoi(elements[0]) + if err != nil { + return DataStoreKey{}, err + } + + dataStoreKey.CollectionRootID = uint32(colRootID) dataStoreKey.InstanceType = InstanceType(elements[1]) dataStoreKey.DocID = elements[2] if numberOfElements == 4 { @@ -351,27 +347,23 @@ func NewSchemaVersionKey(schemaVersionID string) SchemaVersionKey { return SchemaVersionKey{SchemaVersionID: schemaVersionID} } -func NewSchemaHistoryKey(schemaRoot string, previousSchemaVersionID string) SchemaHistoryKey { - return SchemaHistoryKey{ - SchemaRoot: schemaRoot, - PreviousSchemaVersionID: previousSchemaVersionID, +func NewSchemaRootKey(schemaRoot string, schemaVersionID string) SchemaRootKey { + return SchemaRootKey{ + SchemaRoot: schemaRoot, + SchemaVersionID: schemaVersionID, } } -func NewSchemaVersionMigrationKey(schemaVersionID string) SchemaVersionMigrationKey { - return SchemaVersionMigrationKey{SourceSchemaVersionID: schemaVersionID} -} - -func NewSchemaHistoryKeyFromString(keyString string) (SchemaHistoryKey, error) { +func NewSchemaRootKeyFromString(keyString string) (SchemaRootKey, error) { keyString = strings.TrimPrefix(keyString, SCHEMA_VERSION_HISTORY+"/") elements := strings.Split(keyString, "/") if len(elements) != 2 { - return SchemaHistoryKey{}, ErrInvalidKey + return SchemaRootKey{}, ErrInvalidKey } - return SchemaHistoryKey{ - SchemaRoot: elements[0], - PreviousSchemaVersionID: elements[1], + return SchemaRootKey{ + SchemaRoot: elements[0], + SchemaVersionID: elements[1], }, nil } @@ -445,8 +437,8 @@ func (k HeadStoreKey) WithFieldId(fieldId string) HeadStoreKey { func (k DataStoreKey) ToString() string { var result string - if k.CollectionID != "" { - result = result + "/" + k.CollectionID + if k.CollectionRootID != 0 { + result = result + "/" + fmt.Sprint(k.CollectionRootID) } if k.InstanceType != "" { result = result + "/" + string(k.InstanceType) @@ -470,7 +462,7 @@ func (k DataStoreKey) ToDS() ds.Key { } func (k DataStoreKey) Equal(other DataStoreKey) bool { - return k.CollectionID == other.CollectionID && + return k.CollectionRootID == other.CollectionRootID && k.DocID == other.DocID && k.FieldId == other.FieldId && k.InstanceType == other.InstanceType @@ -478,8 +470,8 @@ func (k DataStoreKey) Equal(other DataStoreKey) bool { func (k DataStoreKey) ToPrimaryDataStoreKey() PrimaryDataStoreKey { return PrimaryDataStoreKey{ - CollectionId: k.CollectionID, - DocID: k.DocID, + CollectionRootID: k.CollectionRootID, + DocID: k.DocID, } } @@ -588,8 +580,8 @@ func (k IndexDataStoreKey) Equal(other IndexDataStoreKey) bool { func (k PrimaryDataStoreKey) ToDataStoreKey() DataStoreKey { return DataStoreKey{ - CollectionID: k.CollectionId, - DocID: k.DocID, + CollectionRootID: k.CollectionRootID, + DocID: k.DocID, } } @@ -604,8 +596,8 @@ func (k PrimaryDataStoreKey) ToDS() ds.Key { func (k PrimaryDataStoreKey) ToString() string { result := "" - if k.CollectionId != "" { - result = result + "/" + k.CollectionId + if k.CollectionRootID != 0 { + result = result + "/" + fmt.Sprint(k.CollectionRootID) } result = result + PRIMARY_KEY if k.DocID != "" { @@ -679,43 +671,25 @@ func (k SchemaVersionKey) ToDS() ds.Key { return ds.NewKey(k.ToString()) } -func (k SchemaHistoryKey) ToString() string { +func (k SchemaRootKey) ToString() string { result := SCHEMA_VERSION_HISTORY if k.SchemaRoot != "" { result = result + "/" + k.SchemaRoot } - if k.PreviousSchemaVersionID != "" { - result = result + "/" + k.PreviousSchemaVersionID - } - - return result -} - -func (k SchemaHistoryKey) Bytes() []byte { - return []byte(k.ToString()) -} - -func (k SchemaHistoryKey) ToDS() ds.Key { - return ds.NewKey(k.ToString()) -} - -func (k SchemaVersionMigrationKey) ToString() string { - result := SCHEMA_MIGRATION - - if k.SourceSchemaVersionID != "" { - result = result + "/" + k.SourceSchemaVersionID + if k.SchemaVersionID != "" { + result = result + "/" + k.SchemaVersionID } return result } -func (k SchemaVersionMigrationKey) Bytes() []byte { +func (k SchemaRootKey) Bytes() []byte { return []byte(k.ToString()) } -func (k SchemaVersionMigrationKey) ToDS() ds.Key { +func (k SchemaRootKey) ToDS() ds.Key { return ds.NewKey(k.ToString()) } @@ -832,10 +806,11 @@ func (k DataStoreKey) PrefixEnd() DataStoreKey { newKey.InstanceType = InstanceType(bytesPrefixEnd([]byte(k.InstanceType))) return newKey } - if k.CollectionID != "" { - newKey.CollectionID = string(bytesPrefixEnd([]byte(k.CollectionID))) + if k.CollectionRootID != 0 { + newKey.CollectionRootID = k.CollectionRootID + 1 return newKey } + return newKey } diff --git a/core/key_test.go b/core/key_test.go index 52a22a5856..50bf1198c4 100644 --- a/core/key_test.go +++ b/core/key_test.go @@ -11,6 +11,7 @@ package core import ( + "fmt" "testing" ds "github.com/ipfs/go-datastore" @@ -34,10 +35,10 @@ func TestNewDataStoreKey_ReturnsCollectionIdAndIndexIdAndDocIDAndFieldIdAndInsta t *testing.T, ) { instanceType := "anyType" - fieldId := "f1" + fieldID := "f1" docID := "docID" - collectionId := "1" - inputString := collectionId + "/" + instanceType + "/" + docID + "/" + fieldId + var collectionRootID uint32 = 2 + inputString := fmt.Sprintf("%v/%s/%s/%s", collectionRootID, instanceType, docID, fieldID) result, err := NewDataStoreKey(inputString) if err != nil { @@ -48,12 +49,12 @@ func TestNewDataStoreKey_ReturnsCollectionIdAndIndexIdAndDocIDAndFieldIdAndInsta assert.Equal( t, DataStoreKey{ - CollectionID: collectionId, - DocID: docID, - FieldId: fieldId, - InstanceType: InstanceType(instanceType)}, + CollectionRootID: collectionRootID, + DocID: docID, + FieldId: fieldID, + InstanceType: InstanceType(instanceType)}, result) - assert.Equal(t, "/"+collectionId+"/"+instanceType+"/"+docID+"/"+fieldId, resultString) + assert.Equal(t, fmt.Sprintf("/%v/%s/%s/%s", collectionRootID, instanceType, docID, fieldID), resultString) } func TestNewDataStoreKey_ReturnsEmptyStruct_GivenAStringWithMissingElements(t *testing.T) { @@ -67,8 +68,8 @@ func TestNewDataStoreKey_ReturnsEmptyStruct_GivenAStringWithMissingElements(t *t func TestNewDataStoreKey_GivenAShortObjectMarker(t *testing.T) { instanceType := "anyType" docID := "docID" - collectionId := "1" - inputString := collectionId + "/" + instanceType + "/" + docID + var collectionRootID uint32 = 2 + inputString := fmt.Sprintf("%v/%s/%s", collectionRootID, instanceType, docID) result, err := NewDataStoreKey(inputString) if err != nil { @@ -79,11 +80,11 @@ func TestNewDataStoreKey_GivenAShortObjectMarker(t *testing.T) { assert.Equal( t, DataStoreKey{ - CollectionID: collectionId, - DocID: docID, - InstanceType: InstanceType(instanceType)}, + CollectionRootID: collectionRootID, + DocID: docID, + InstanceType: InstanceType(instanceType)}, result) - assert.Equal(t, "/"+collectionId+"/"+instanceType+"/"+docID, resultString) + assert.Equal(t, fmt.Sprintf("/%v/%s/%s", collectionRootID, instanceType, docID), resultString) } func TestNewDataStoreKey_GivenAStringWithExtraPrefixes(t *testing.T) { diff --git a/db/backup.go b/db/backup.go index 5573d77894..431e0eb2ee 100644 --- a/db/backup.go +++ b/db/backup.go @@ -122,7 +122,7 @@ func (db *db) basicExport(ctx context.Context, txn datastore.Txn, config *client cols := []client.Collection{} if len(config.Collections) == 0 { - cols, err = db.getAllCollections(ctx, txn) + cols, err = db.getAllCollections(ctx, txn, false) if err != nil { return NewErrFailedToGetAllCollections(err) } diff --git a/db/base/collection_keys.go b/db/base/collection_keys.go index b2adc2f9e7..e63397d72c 100644 --- a/db/base/collection_keys.go +++ b/db/base/collection_keys.go @@ -20,7 +20,7 @@ import ( // MakeDataStoreKeyWithCollectionDescription returns the datastore key for the given collection description. func MakeDataStoreKeyWithCollectionDescription(col client.CollectionDescription) core.DataStoreKey { return core.DataStoreKey{ - CollectionID: col.IDString(), + CollectionRootID: col.RootID, } } @@ -30,8 +30,8 @@ func MakeDataStoreKeyWithCollectionAndDocID( docID string, ) core.DataStoreKey { return core.DataStoreKey{ - CollectionID: col.IDString(), - DocID: docID, + CollectionRootID: col.RootID, + DocID: docID, } } diff --git a/db/collection.go b/db/collection.go index 639a53a341..796cf24a76 100644 --- a/db/collection.go +++ b/db/collection.go @@ -21,6 +21,7 @@ import ( ds "github.com/ipfs/go-datastore" "github.com/ipfs/go-datastore/query" ipld "github.com/ipfs/go-ipld-format" + "github.com/lens-vm/lens/host-go/config/model" "github.com/sourcenetwork/immutable" "github.com/sourcenetwork/defradb/client" @@ -115,6 +116,7 @@ func (db *db) createCollection( return nil, err } desc.ID = uint32(colID) + desc.RootID = desc.ID schema, err = description.CreateSchemaVersion(ctx, txn, schema) if err != nil { @@ -151,7 +153,8 @@ func (db *db) updateSchema( existingSchemaByName map[string]client.SchemaDescription, proposedDescriptionsByName map[string]client.SchemaDescription, schema client.SchemaDescription, - setAsDefaultVersion bool, + migration immutable.Option[model.Lens], + setAsActiveVersion bool, ) error { hasChanged, err := db.validateUpdateSchema( ctx, @@ -195,31 +198,92 @@ func (db *db) updateSchema( return err } - if setAsDefaultVersion { - cols, err := description.GetCollectionsBySchemaVersionID(ctx, txn, previousVersionID) + // After creating the new schema version, we need to new create collection versions for + // any collection using the previous version. These will be inactive unless [setAsActiveVersion] + // is true. + + cols, err := description.GetCollectionsBySchemaVersionID(ctx, txn, previousVersionID) + if err != nil { + return err + } + + colSeq, err := db.getSequence(ctx, txn, core.COLLECTION) + if err != nil { + return err + } + + for _, col := range cols { + previousID := col.ID + + existingCols, err := description.GetCollectionsBySchemaVersionID(ctx, txn, schema.VersionID) if err != nil { return err } - for _, col := range cols { - if !col.Name.HasValue() { - // Nameless collections cannot be made default as they cannot be queried without a name. - // Note: The `setAsDefaultVersion` block will need a re-write when collections become immutable - // and the schema version stuff gets tracked by [CollectionDescription.Sources] instead. - continue + // The collection version may exist before the schema version was created locally. This is + // because migrations for the globally known schema version may have been registered locally + // (typically to handle documentes synced over P2P at higher versions) before the local schema + // was updated. We need to check for them now, and update them instead of creating new ones + // if they exist. + var isExistingCol bool + existingColLoop: + for _, existingCol := range existingCols { + sources := existingCol.CollectionSources() + for _, source := range sources { + // Make sure that this collection is the parent of the current [col], and not part of + // another collection set that happens to be using the same schema. + if source.SourceCollectionID == previousID { + if existingCol.RootID == client.OrphanRootID { + existingCol.RootID = col.RootID + existingCol, err = description.SaveCollection(ctx, txn, existingCol) + if err != nil { + return err + } + } + isExistingCol = true + break existingColLoop + } } + } - col.SchemaVersionID = schema.VersionID - - col, err = description.SaveCollection(ctx, txn, col) + if !isExistingCol { + colID, err := colSeq.next(ctx, txn) if err != nil { return err } - err = db.setDefaultSchemaVersionExplicit(ctx, txn, col.Name.Value(), schema.VersionID) + // Create any new collections without a name (inactive), if [setAsActiveVersion] is true + // they will be activated later along with any existing collection versions. + col.Name = immutable.None[string]() + col.ID = uint32(colID) + col.SchemaVersionID = schema.VersionID + col.Sources = []any{ + &client.CollectionSource{ + SourceCollectionID: previousID, + Transform: migration, + }, + } + + _, err = description.SaveCollection(ctx, txn, col) if err != nil { return err } + + if migration.HasValue() { + err = db.LensRegistry().SetMigration(ctx, col.ID, migration.Value()) + if err != nil { + return err + } + } + } + } + + if setAsActiveVersion { + // activate collection versions using the new schema ID. This call must be made after + // all new collection versions have been saved. + err = db.setActiveSchemaVersion(ctx, txn, schema.VersionID) + if err != nil { + return err } } @@ -397,7 +461,14 @@ func validateUpdateSchemaFields( return hasChanged, nil } -func (db *db) setDefaultSchemaVersion( +// SetActiveSchemaVersion activates all collection versions with the given schema version, and deactivates all +// those without it (if they share the same schema root). +// +// This will affect all operations interacting with the schema where a schema version is not explicitly +// provided. This includes GQL queries and Collection operations. +// +// It will return an error if the provided schema version ID does not exist. +func (db *db) setActiveSchemaVersion( ctx context.Context, txn datastore.Txn, schemaVersionID string, @@ -406,46 +477,136 @@ func (db *db) setDefaultSchemaVersion( return ErrSchemaVersionIDEmpty } + cols, err := description.GetCollectionsBySchemaVersionID(ctx, txn, schemaVersionID) + if err != nil { + return err + } + schema, err := description.GetSchemaVersion(ctx, txn, schemaVersionID) if err != nil { return err } - colDescs, err := description.GetCollectionsBySchemaRoot(ctx, txn, schema.Root) + colsWithRoot, err := description.GetCollectionsBySchemaRoot(ctx, txn, schema.Root) if err != nil { return err } - for _, col := range colDescs { - col.SchemaVersionID = schemaVersionID - col, err = description.SaveCollection(ctx, txn, col) + colsBySourceID := map[uint32][]client.CollectionDescription{} + colsByID := make(map[uint32]client.CollectionDescription, len(colsWithRoot)) + for _, col := range colsWithRoot { + colsByID[col.ID] = col + + sources := col.CollectionSources() + if len(sources) > 0 { + // For now, we assume that each collection can only have a single source. This will likely need + // to change later. + slice := colsBySourceID[sources[0].SourceCollectionID] + slice = append(slice, col) + colsBySourceID[sources[0].SourceCollectionID] = slice + } + } + + for _, col := range cols { + if col.Name.HasValue() { + // The collection is already active, so we can skip it and continue + continue + } + sources := col.CollectionSources() + + var activeCol client.CollectionDescription + var rootCol client.CollectionDescription + var isActiveFound bool + if len(sources) > 0 { + // For now, we assume that each collection can only have a single source. This will likely need + // to change later. + activeCol, rootCol, isActiveFound = db.getActiveCollectionDown(ctx, txn, colsByID, sources[0].SourceCollectionID) + } + if !isActiveFound { + // We need to look both down and up for the active version - the most recent is not nessecarily the active one. + activeCol, isActiveFound = db.getActiveCollectionUp(ctx, txn, colsBySourceID, rootCol.ID) + } + + var newName string + if isActiveFound { + newName = activeCol.Name.Value() + } else { + // If there are no active versions in the collection set, take the name of the schema to be the name of the + // collection. + newName = schema.Name + } + col.Name = immutable.Some(newName) + + _, err = description.SaveCollection(ctx, txn, col) if err != nil { return err } + + if isActiveFound { + // Deactivate the currently active collection by setting its name to none. + activeCol.Name = immutable.None[string]() + _, err = description.SaveCollection(ctx, txn, activeCol) + if err != nil { + return err + } + } } + // Load the schema into the clients (e.g. GQL) return db.loadSchema(ctx, txn) } -func (db *db) setDefaultSchemaVersionExplicit( +func (db *db) getActiveCollectionDown( ctx context.Context, txn datastore.Txn, - collectionName string, - schemaVersionID string, -) error { - if schemaVersionID == "" { - return ErrSchemaVersionIDEmpty + colsByID map[uint32]client.CollectionDescription, + id uint32, +) (client.CollectionDescription, client.CollectionDescription, bool) { + col, ok := colsByID[id] + if !ok { + return client.CollectionDescription{}, client.CollectionDescription{}, false } - col, err := description.GetCollectionByName(ctx, txn, collectionName) - if err != nil { - return err + if col.Name.HasValue() { + return col, client.CollectionDescription{}, true } - col.SchemaVersionID = schemaVersionID + sources := col.CollectionSources() + if len(sources) == 0 { + // If a collection has zero sources it is likely the initial collection version, or + // this collection set is currently orphaned (can happen when setting migrations that + // do not yet link all the way back to a non-orphaned set) + return client.CollectionDescription{}, col, false + } - _, err = description.SaveCollection(ctx, txn, col) - return err + // For now, we assume that each collection can only have a single source. This will likely need + // to change later. + return db.getActiveCollectionDown(ctx, txn, colsByID, sources[0].SourceCollectionID) +} + +func (db *db) getActiveCollectionUp( + ctx context.Context, + txn datastore.Txn, + colsBySourceID map[uint32][]client.CollectionDescription, + id uint32, +) (client.CollectionDescription, bool) { + cols, ok := colsBySourceID[id] + if !ok { + // We have reached the top of the set, and have not found an active collection + return client.CollectionDescription{}, false + } + + for _, col := range cols { + if col.Name.HasValue() { + return col, true + } + activeCol, isFound := db.getActiveCollectionUp(ctx, txn, colsBySourceID, col.ID) + if isFound { + return activeCol, isFound + } + } + + return client.CollectionDescription{}, false } // getCollectionsByVersionId returns the [*collection]s at the given [schemaVersionId] version. @@ -543,7 +704,11 @@ func (db *db) getCollectionsBySchemaRoot( for i, col := range cols { schema, err := description.GetSchemaVersion(ctx, txn, col.SchemaVersionID) if err != nil { - return nil, err + // If the schema is not found we leave it as empty and carry on. This can happen when + // a migration is registered before the schema is declared locally. + if !errors.Is(err, ds.ErrNotFound) { + return nil, err + } } collection := db.newCollection(col, schema) @@ -558,18 +723,37 @@ func (db *db) getCollectionsBySchemaRoot( return collections, nil } -// getAllCollections gets all the currently defined collections. -func (db *db) getAllCollections(ctx context.Context, txn datastore.Txn) ([]client.Collection, error) { - cols, err := description.GetCollections(ctx, txn) - if err != nil { - return nil, err +// getAllCollections returns all collections and their descriptions that currently exist within +// this [Store]. +// +// If `true` is provided, the results will include inactive collections. If `false`, only active collections +// will be returned. +func (db *db) getAllCollections(ctx context.Context, txn datastore.Txn, getInactive bool) ([]client.Collection, error) { + var cols []client.CollectionDescription + + if getInactive { + var err error + cols, err = description.GetCollections(ctx, txn) + if err != nil { + return nil, err + } + } else { + var err error + cols, err = description.GetActiveCollections(ctx, txn) + if err != nil { + return nil, err + } } collections := make([]client.Collection, len(cols)) for i, col := range cols { schema, err := description.GetSchemaVersion(ctx, txn, col.SchemaVersionID) if err != nil { - return nil, err + // If the schema is not found we leave it as empty and carry on. This can happen when + // a migration is registered before the schema is declared locally. + if !errors.Is(err, ds.ErrNotFound) { + return nil, err + } } collection := db.newCollection(col, schema) @@ -586,7 +770,7 @@ func (db *db) getAllCollections(ctx context.Context, txn datastore.Txn) ([]clien // getAllActiveDefinitions returns all queryable collection/views and any embedded schema used by them. func (db *db) getAllActiveDefinitions(ctx context.Context, txn datastore.Txn) ([]client.CollectionDefinition, error) { - cols, err := description.GetCollections(ctx, txn) + cols, err := description.GetActiveCollections(ctx, txn) if err != nil { return nil, err } @@ -643,7 +827,7 @@ func (c *collection) getAllDocIDsChan( txn datastore.Txn, ) (<-chan client.DocIDResult, error) { prefix := core.PrimaryDataStoreKey{ // empty path for all keys prefix - CollectionId: fmt.Sprint(c.ID()), + CollectionRootID: c.Description().RootID, } q, err := txn.Datastore().Query(ctx, query.Query{ Prefix: prefix.ToString(), @@ -1236,16 +1420,16 @@ func (c *collection) commitImplicitTxn(ctx context.Context, txn datastore.Txn) e func (c *collection) getPrimaryKeyFromDocID(docID client.DocID) core.PrimaryDataStoreKey { return core.PrimaryDataStoreKey{ - CollectionId: fmt.Sprint(c.ID()), - DocID: docID.String(), + CollectionRootID: c.Description().RootID, + DocID: docID.String(), } } func (c *collection) getDataStoreKeyFromDocID(docID client.DocID) core.DataStoreKey { return core.DataStoreKey{ - CollectionID: fmt.Sprint(c.ID()), - DocID: docID.String(), - InstanceType: core.ValueKey, + CollectionRootID: c.Description().RootID, + DocID: docID.String(), + InstanceType: core.ValueKey, } } @@ -1256,9 +1440,9 @@ func (c *collection) tryGetFieldKey(primaryKey core.PrimaryDataStoreKey, fieldNa } return core.DataStoreKey{ - CollectionID: primaryKey.CollectionId, - DocID: primaryKey.DocID, - FieldId: strconv.FormatUint(uint64(fieldId), 10), + CollectionRootID: c.Description().RootID, + DocID: primaryKey.DocID, + FieldId: strconv.FormatUint(uint64(fieldId), 10), }, true } diff --git a/db/collection_delete.go b/db/collection_delete.go index f91b8e38f2..6c360d09c0 100644 --- a/db/collection_delete.go +++ b/db/collection_delete.go @@ -12,7 +12,6 @@ package db import ( "context" - "fmt" "github.com/sourcenetwork/defradb/client" "github.com/sourcenetwork/defradb/client/request" @@ -207,8 +206,8 @@ func (c *collection) deleteWithFilter( docID := doc.GetID() primaryKey := core.PrimaryDataStoreKey{ - CollectionId: fmt.Sprint(c.ID()), - DocID: docID, + CollectionRootID: c.Description().RootID, + DocID: docID, } // Delete the document that is associated with this DS key we got from the filter. diff --git a/db/description/collection.go b/db/description/collection.go index 3daeaf31de..8ffd473053 100644 --- a/db/description/collection.go +++ b/db/description/collection.go @@ -13,6 +13,7 @@ package description import ( "context" "encoding/json" + "sort" "github.com/ipfs/go-datastore/query" @@ -193,6 +194,8 @@ func GetCollectionsBySchemaRoot( } // GetCollections returns all collections in the system. +// +// This includes inactive collections. func GetCollections( ctx context.Context, txn datastore.Txn, @@ -228,6 +231,47 @@ func GetCollections( return cols, nil } +// GetActiveCollections returns all active collections in the system. +func GetActiveCollections( + ctx context.Context, + txn datastore.Txn, +) ([]client.CollectionDescription, error) { + q, err := txn.Systemstore().Query(ctx, query.Query{ + Prefix: core.NewCollectionNameKey("").ToString(), + }) + if err != nil { + return nil, NewErrFailedToCreateCollectionQuery(err) + } + + cols := make([]client.CollectionDescription, 0) + for res := range q.Next() { + if res.Error != nil { + if err := q.Close(); err != nil { + return nil, NewErrFailedToCloseCollectionQuery(err) + } + return nil, err + } + + var id uint32 + err = json.Unmarshal(res.Value, &id) + if err != nil { + return nil, err + } + + col, err := GetCollectionByID(ctx, txn, id) + if err != nil { + return nil, err + } + + cols = append(cols, col) + } + + // Sort the results by ID, so that the order matches that of [GetCollections]. + sort.Slice(cols, func(i, j int) bool { return cols[i].ID < cols[j].ID }) + + return cols, nil +} + // HasCollectionByName returns true if there is a collection of the given name, // else returns false. func HasCollectionByName( diff --git a/db/description/schema.go b/db/description/schema.go index c486ee1a59..08e6920302 100644 --- a/db/description/schema.go +++ b/db/description/schema.go @@ -47,7 +47,6 @@ func CreateSchemaVersion( return client.SchemaDescription{}, err } versionID := scid.String() - previousSchemaVersionID := desc.VersionID isNew := desc.Root == "" desc.VersionID = versionID @@ -69,9 +68,9 @@ func CreateSchemaVersion( } if !isNew { - // We don't need to add a history key if this is the first version - schemaVersionHistoryKey := core.NewSchemaHistoryKey(desc.Root, previousSchemaVersionID) - err = txn.Systemstore().Put(ctx, schemaVersionHistoryKey.ToDS(), []byte(desc.VersionID)) + // We don't need to add a root key if this is the first version + schemaVersionHistoryKey := core.NewSchemaRootKey(desc.Root, desc.VersionID) + err = txn.Systemstore().Put(ctx, schemaVersionHistoryKey.ToDS(), []byte{}) if err != nil { return client.SchemaDescription{}, err } @@ -152,7 +151,7 @@ func GetSchemas( ctx context.Context, txn datastore.Txn, ) ([]client.SchemaDescription, error) { - cols, err := GetCollections(ctx, txn) + cols, err := GetActiveCollections(ctx, txn) if err != nil { return nil, err } @@ -253,7 +252,7 @@ func GetSchemaVersionIDs( // It is not present in the history prefix. schemaVersions := []string{schemaRoot} - prefix := core.NewSchemaHistoryKey(schemaRoot, "") + prefix := core.NewSchemaRootKey(schemaRoot, "") q, err := txn.Systemstore().Query(ctx, query.Query{ Prefix: prefix.ToString(), KeysOnly: true, @@ -270,15 +269,12 @@ func GetSchemaVersionIDs( return nil, err } - key, err := core.NewSchemaHistoryKeyFromString(res.Key) + key, err := core.NewSchemaRootKeyFromString(res.Key) if err != nil { - if err := q.Close(); err != nil { - return nil, NewErrFailedToCloseSchemaQuery(err) - } return nil, err } - schemaVersions = append(schemaVersions, key.PreviousSchemaVersionID) + schemaVersions = append(schemaVersions, key.SchemaVersionID) } return schemaVersions, nil diff --git a/db/indexed_docs_test.go b/db/indexed_docs_test.go index b8987819ed..34021ea369 100644 --- a/db/indexed_docs_test.go +++ b/db/indexed_docs_test.go @@ -131,7 +131,7 @@ func (b *indexKeyBuilder) Build() core.IndexDataStoreKey { return key } - cols, err := b.f.db.getAllCollections(b.f.ctx, b.f.txn) + cols, err := b.f.db.getAllCollections(b.f.ctx, b.f.txn, false) require.NoError(b.f.t, err) var collection client.Collection for _, col := range cols { @@ -641,7 +641,7 @@ func TestNonUniqueCreate_IfDatastoreFailsToStoreIndex_ReturnError(t *testing.T) f.saveDocToCollection(doc, f.users) fieldKeyString := core.DataStoreKey{ - CollectionID: f.users.Description().IDString(), + CollectionRootID: f.users.Description().RootID, }.WithDocID(doc.ID().String()). WithFieldId("1"). WithValueFlag(). diff --git a/db/lens.go b/db/lens.go new file mode 100644 index 0000000000..009f2de92b --- /dev/null +++ b/db/lens.go @@ -0,0 +1,160 @@ +// Copyright 2024 Democratized Data Foundation +// +// Use of this software is governed by the Business Source License +// included in the file licenses/BSL.txt. +// +// As of the Change Date specified in that file, in accordance with +// the Business Source License, use of this software will be governed +// by the Apache License, Version 2.0, included in the file +// licenses/APL.txt. + +package db + +import ( + "context" + + ds "github.com/ipfs/go-datastore" + "github.com/sourcenetwork/immutable" + + "github.com/sourcenetwork/defradb/client" + "github.com/sourcenetwork/defradb/core" + "github.com/sourcenetwork/defradb/datastore" + "github.com/sourcenetwork/defradb/db/description" + "github.com/sourcenetwork/defradb/errors" +) + +func (db *db) setMigration(ctx context.Context, txn datastore.Txn, cfg client.LensConfig) error { + dstCols, err := description.GetCollectionsBySchemaVersionID(ctx, txn, cfg.DestinationSchemaVersionID) + if err != nil { + return err + } + + sourceCols, err := description.GetCollectionsBySchemaVersionID(ctx, txn, cfg.SourceSchemaVersionID) + if err != nil { + return err + } + + colSeq, err := db.getSequence(ctx, txn, core.COLLECTION) + if err != nil { + return err + } + + if len(sourceCols) == 0 { + // If no collections are found with the given [SourceSchemaVersionID], this migration must be from + // a collection/schema version that does not yet exist locally. We must now create it. + colID, err := colSeq.next(ctx, txn) + if err != nil { + return err + } + + desc := client.CollectionDescription{ + ID: uint32(colID), + RootID: client.OrphanRootID, + SchemaVersionID: cfg.SourceSchemaVersionID, + } + + col, err := description.SaveCollection(ctx, txn, desc) + if err != nil { + return err + } + + sourceCols = append(sourceCols, col) + } + + for _, sourceCol := range sourceCols { + isDstCollectionFound := false + dstColsLoop: + for i, dstCol := range dstCols { + if len(dstCol.Sources) == 0 { + // If the destingation collection has no sources at all, it must have been added as an orphaned source + // by another migration. This can happen if the migrations are added in an unusual order, before + // their schemas have been defined locally. + dstCol.Sources = append(dstCol.Sources, &client.CollectionSource{ + SourceCollectionID: sourceCol.ID, + }) + dstCols[i] = dstCol + } + + for _, source := range dstCol.CollectionSources() { + if source.SourceCollectionID == sourceCol.ID { + isDstCollectionFound = true + break dstColsLoop + } + } + } + + if !isDstCollectionFound { + // If the destination collection was not found, we must create it. This can happen when setting a migration + // to a schema version that does not yet exist locally. + colID, err := colSeq.next(ctx, txn) + if err != nil { + return err + } + + desc := client.CollectionDescription{ + ID: uint32(colID), + RootID: sourceCol.RootID, + SchemaVersionID: cfg.DestinationSchemaVersionID, + Sources: []any{ + &client.CollectionSource{ + SourceCollectionID: sourceCol.ID, + // The transform will be set later, when updating all destination collections + // whether they are newly created or not. + }, + }, + } + + col, err := description.SaveCollection(ctx, txn, desc) + if err != nil { + return err + } + + if desc.RootID != client.OrphanRootID { + var schemaFound bool + // If the root schema id is known, we need to add it to the index, even if the schema is not known locally + schema, err := description.GetSchemaVersion(ctx, txn, cfg.SourceSchemaVersionID) + if err != nil { + if !errors.Is(err, ds.ErrNotFound) { + return err + } + } else { + schemaFound = true + } + + if schemaFound { + schemaRootKey := core.NewSchemaRootKey(schema.Root, cfg.DestinationSchemaVersionID) + err = txn.Systemstore().Put(ctx, schemaRootKey.ToDS(), []byte{}) + if err != nil { + return err + } + } + } + + dstCols = append(dstCols, col) + } + } + + for _, col := range dstCols { + collectionSources := col.CollectionSources() + + for _, source := range collectionSources { + // WARNING: Here we assume that the collection source points at a collection of the source schema version. + // This works currently, as collections only have a single source. If/when this changes we need to make + // sure we only update the correct source. + + source.Transform = immutable.Some(cfg.Lens) + + err = db.LensRegistry().SetMigration(ctx, col.ID, cfg.Lens) + if err != nil { + return err + } + } + + _, err = description.SaveCollection(ctx, txn, col) + if err != nil { + return err + } + } + + return nil +} diff --git a/db/schema.go b/db/schema.go index 7b8a7f1765..7d984542f6 100644 --- a/db/schema.go +++ b/db/schema.go @@ -18,6 +18,7 @@ import ( "unicode" jsonpatch "github.com/evanphx/json-patch/v5" + "github.com/lens-vm/lens/host-go/config/model" "github.com/sourcenetwork/immutable" @@ -85,6 +86,7 @@ func (db *db) patchSchema( ctx context.Context, txn datastore.Txn, patchString string, + migration immutable.Option[model.Lens], setAsDefaultVersion bool, ) error { patch, err := jsonpatch.DecodePatch([]byte(patchString)) @@ -133,6 +135,7 @@ func (db *db) patchSchema( existingSchemaByName, newSchemaByName, schema, + migration, setAsDefaultVersion, ) if err != nil { diff --git a/db/txn_db.go b/db/txn_db.go index a54dda99c7..a05f2d895d 100644 --- a/db/txn_db.go +++ b/db/txn_db.go @@ -13,6 +13,9 @@ package db import ( "context" + "github.com/lens-vm/lens/host-go/config/model" + "github.com/sourcenetwork/immutable" + "github.com/sourcenetwork/defradb/client" "github.com/sourcenetwork/defradb/datastore" ) @@ -160,19 +163,19 @@ func (db *explicitTxnDB) GetCollectionsByVersionID( } // GetAllCollections gets all the currently defined collections. -func (db *implicitTxnDB) GetAllCollections(ctx context.Context) ([]client.Collection, error) { +func (db *implicitTxnDB) GetAllCollections(ctx context.Context, getInactive bool) ([]client.Collection, error) { txn, err := db.NewTxn(ctx, true) if err != nil { return nil, err } defer txn.Discard(ctx) - return db.getAllCollections(ctx, txn) + return db.getAllCollections(ctx, txn, getInactive) } // GetAllCollections gets all the currently defined collections. -func (db *explicitTxnDB) GetAllCollections(ctx context.Context) ([]client.Collection, error) { - return db.getAllCollections(ctx, db.txn) +func (db *explicitTxnDB) GetAllCollections(ctx context.Context, getInactive bool) ([]client.Collection, error) { + return db.getAllCollections(ctx, db.txn, getInactive) } // GetSchemasByName returns the all schema versions with the given name. @@ -313,6 +316,7 @@ func (db *explicitTxnDB) AddSchema(ctx context.Context, schemaString string) ([] func (db *implicitTxnDB) PatchSchema( ctx context.Context, patchString string, + migration immutable.Option[model.Lens], setAsDefaultVersion bool, ) error { txn, err := db.NewTxn(ctx, false) @@ -321,7 +325,7 @@ func (db *implicitTxnDB) PatchSchema( } defer txn.Discard(ctx) - err = db.patchSchema(ctx, txn, patchString, setAsDefaultVersion) + err = db.patchSchema(ctx, txn, patchString, migration, setAsDefaultVersion) if err != nil { return err } @@ -343,19 +347,20 @@ func (db *implicitTxnDB) PatchSchema( func (db *explicitTxnDB) PatchSchema( ctx context.Context, patchString string, + migration immutable.Option[model.Lens], setAsDefaultVersion bool, ) error { - return db.patchSchema(ctx, db.txn, patchString, setAsDefaultVersion) + return db.patchSchema(ctx, db.txn, patchString, migration, setAsDefaultVersion) } -func (db *implicitTxnDB) SetDefaultSchemaVersion(ctx context.Context, schemaVersionID string) error { +func (db *implicitTxnDB) SetActiveSchemaVersion(ctx context.Context, schemaVersionID string) error { txn, err := db.NewTxn(ctx, false) if err != nil { return err } defer txn.Discard(ctx) - err = db.setDefaultSchemaVersion(ctx, txn, schemaVersionID) + err = db.setActiveSchemaVersion(ctx, txn, schemaVersionID) if err != nil { return err } @@ -363,8 +368,8 @@ func (db *implicitTxnDB) SetDefaultSchemaVersion(ctx context.Context, schemaVers return txn.Commit(ctx) } -func (db *explicitTxnDB) SetDefaultSchemaVersion(ctx context.Context, schemaVersionID string) error { - return db.setDefaultSchemaVersion(ctx, db.txn, schemaVersionID) +func (db *explicitTxnDB) SetActiveSchemaVersion(ctx context.Context, schemaVersionID string) error { + return db.setActiveSchemaVersion(ctx, db.txn, schemaVersionID) } func (db *implicitTxnDB) SetMigration(ctx context.Context, cfg client.LensConfig) error { @@ -374,7 +379,7 @@ func (db *implicitTxnDB) SetMigration(ctx context.Context, cfg client.LensConfig } defer txn.Discard(ctx) - err = db.lensRegistry.SetMigration(ctx, cfg) + err = db.setMigration(ctx, txn, cfg) if err != nil { return err } @@ -383,7 +388,7 @@ func (db *implicitTxnDB) SetMigration(ctx context.Context, cfg client.LensConfig } func (db *explicitTxnDB) SetMigration(ctx context.Context, cfg client.LensConfig) error { - return db.lensRegistry.SetMigration(ctx, cfg) + return db.setMigration(ctx, db.txn, cfg) } func (db *implicitTxnDB) AddView(ctx context.Context, query string, sdl string) ([]client.CollectionDefinition, error) { diff --git a/docs/data_format_changes/i2198-collection-remodel.md b/docs/data_format_changes/i2198-collection-remodel.md new file mode 100644 index 0000000000..8fb6898393 --- /dev/null +++ b/docs/data_format_changes/i2198-collection-remodel.md @@ -0,0 +1,5 @@ +# Remodel Collection SchemaVersions and migrations on Collections + +Models Collection SchemaVersions and migrations on Collections, instead of in the Lens Registry. + +Some test schema version IDs were also corrected. diff --git a/http/client.go b/http/client.go index 5db4b0de20..1c4012d76b 100644 --- a/http/client.go +++ b/http/client.go @@ -17,9 +17,12 @@ import ( "io" "net/http" "net/url" + "strconv" "strings" blockstore "github.com/ipfs/boxo/blockstore" + "github.com/lens-vm/lens/host-go/config/model" + "github.com/sourcenetwork/immutable" sse "github.com/vito/go-sse/sse" "github.com/sourcenetwork/defradb/client" @@ -134,16 +137,18 @@ func (c *Client) AddSchema(ctx context.Context, schema string) ([]client.Collect type patchSchemaRequest struct { Patch string SetAsDefaultVersion bool + Migration immutable.Option[model.Lens] } func (c *Client) PatchSchema( ctx context.Context, patch string, + migration immutable.Option[model.Lens], setAsDefaultVersion bool, ) error { methodURL := c.http.baseURL.JoinPath("schema") - body, err := json.Marshal(patchSchemaRequest{patch, setAsDefaultVersion}) + body, err := json.Marshal(patchSchemaRequest{patch, setAsDefaultVersion, migration}) if err != nil { return err } @@ -156,7 +161,7 @@ func (c *Client) PatchSchema( return err } -func (c *Client) SetDefaultSchemaVersion(ctx context.Context, schemaVersionID string) error { +func (c *Client) SetActiveSchemaVersion(ctx context.Context, schemaVersionID string) error { methodURL := c.http.baseURL.JoinPath("schema", "default") req, err := http.NewRequestWithContext(ctx, http.MethodPost, methodURL.String(), strings.NewReader(schemaVersionID)) @@ -194,7 +199,20 @@ func (c *Client) AddView(ctx context.Context, query string, sdl string) ([]clien } func (c *Client) SetMigration(ctx context.Context, config client.LensConfig) error { - return c.LensRegistry().SetMigration(ctx, config) + methodURL := c.http.baseURL.JoinPath("lens") + + body, err := json.Marshal(config) + if err != nil { + return err + } + + req, err := http.NewRequestWithContext(ctx, http.MethodPost, methodURL.String(), bytes.NewBuffer(body)) + if err != nil { + return err + } + + _, err = c.http.request(req) + return err } func (c *Client) LensRegistry() client.LensRegistry { @@ -254,8 +272,9 @@ func (c *Client) GetCollectionsByVersionID(ctx context.Context, versionId string return collections, nil } -func (c *Client) GetAllCollections(ctx context.Context) ([]client.Collection, error) { +func (c *Client) GetAllCollections(ctx context.Context, getInactive bool) ([]client.Collection, error) { methodURL := c.http.baseURL.JoinPath("collections") + methodURL.RawQuery = url.Values{"get_inactive": []string{strconv.FormatBool(getInactive)}}.Encode() req, err := http.NewRequestWithContext(ctx, http.MethodGet, methodURL.String(), nil) if err != nil { diff --git a/http/client_lens.go b/http/client_lens.go index 3c8c2fc903..9021aa31d6 100644 --- a/http/client_lens.go +++ b/http/client_lens.go @@ -14,8 +14,10 @@ import ( "bytes" "context" "encoding/json" + "fmt" "net/http" + "github.com/lens-vm/lens/host-go/config/model" "github.com/sourcenetwork/immutable/enumerable" "github.com/sourcenetwork/defradb/client" @@ -34,10 +36,18 @@ func (c *LensRegistry) WithTxn(tx datastore.Txn) client.LensRegistry { return &LensRegistry{http} } -func (c *LensRegistry) SetMigration(ctx context.Context, config client.LensConfig) error { - methodURL := c.http.baseURL.JoinPath("lens") +type setMigrationRequest struct { + CollectionID uint32 + Config model.Lens +} + +func (c *LensRegistry) SetMigration(ctx context.Context, collectionID uint32, config model.Lens) error { + methodURL := c.http.baseURL.JoinPath("lens", "registry") - body, err := json.Marshal(config) + body, err := json.Marshal(setMigrationRequest{ + CollectionID: collectionID, + Config: config, + }) if err != nil { return err } @@ -50,7 +60,7 @@ func (c *LensRegistry) SetMigration(ctx context.Context, config client.LensConfi } func (c *LensRegistry) ReloadLenses(ctx context.Context) error { - methodURL := c.http.baseURL.JoinPath("lens", "reload") + methodURL := c.http.baseURL.JoinPath("lens", "registry", "reload") req, err := http.NewRequestWithContext(ctx, http.MethodPost, methodURL.String(), nil) if err != nil { @@ -60,12 +70,17 @@ func (c *LensRegistry) ReloadLenses(ctx context.Context) error { return err } +type migrateRequest struct { + CollectionID uint32 + Data []map[string]any +} + func (c *LensRegistry) MigrateUp( ctx context.Context, src enumerable.Enumerable[map[string]any], - schemaVersionID string, + collectionID uint32, ) (enumerable.Enumerable[map[string]any], error) { - methodURL := c.http.baseURL.JoinPath("lens", schemaVersionID, "up") + methodURL := c.http.baseURL.JoinPath("lens", "registry", fmt.Sprint(collectionID), "up") var data []map[string]any err := enumerable.ForEach(src, func(item map[string]any) { @@ -74,7 +89,13 @@ func (c *LensRegistry) MigrateUp( if err != nil { return nil, err } - body, err := json.Marshal(data) + + request := migrateRequest{ + CollectionID: collectionID, + Data: data, + } + + body, err := json.Marshal(request) if err != nil { return nil, err } @@ -92,9 +113,9 @@ func (c *LensRegistry) MigrateUp( func (c *LensRegistry) MigrateDown( ctx context.Context, src enumerable.Enumerable[map[string]any], - schemaVersionID string, + collectionID uint32, ) (enumerable.Enumerable[map[string]any], error) { - methodURL := c.http.baseURL.JoinPath("lens", schemaVersionID, "down") + methodURL := c.http.baseURL.JoinPath("lens", "registry", fmt.Sprint(collectionID), "down") var data []map[string]any err := enumerable.ForEach(src, func(item map[string]any) { @@ -103,7 +124,13 @@ func (c *LensRegistry) MigrateDown( if err != nil { return nil, err } - body, err := json.Marshal(data) + + request := migrateRequest{ + CollectionID: collectionID, + Data: data, + } + + body, err := json.Marshal(request) if err != nil { return nil, err } @@ -117,31 +144,3 @@ func (c *LensRegistry) MigrateDown( } return enumerable.New(result), nil } - -func (c *LensRegistry) Config(ctx context.Context) ([]client.LensConfig, error) { - methodURL := c.http.baseURL.JoinPath("lens") - - req, err := http.NewRequestWithContext(ctx, http.MethodGet, methodURL.String(), nil) - if err != nil { - return nil, err - } - var cfgs []client.LensConfig - if err := c.http.requestJson(req, &cfgs); err != nil { - return nil, err - } - return cfgs, nil -} - -func (c *LensRegistry) HasMigration(ctx context.Context, schemaVersionID string) (bool, error) { - methodURL := c.http.baseURL.JoinPath("lens", schemaVersionID) - - req, err := http.NewRequestWithContext(ctx, http.MethodGet, methodURL.String(), nil) - if err != nil { - return false, err - } - _, err = c.http.request(req) - if err != nil { - return false, err - } - return true, nil -} diff --git a/http/handler_lens.go b/http/handler_lens.go index 5d0838b76a..cb69a691fe 100644 --- a/http/handler_lens.go +++ b/http/handler_lens.go @@ -14,7 +14,6 @@ import ( "net/http" "github.com/getkin/kin-openapi/openapi3" - "github.com/go-chi/chi/v5" "github.com/sourcenetwork/immutable/enumerable" "github.com/sourcenetwork/defradb/client" @@ -36,12 +35,13 @@ func (s *lensHandler) ReloadLenses(rw http.ResponseWriter, req *http.Request) { func (s *lensHandler) SetMigration(rw http.ResponseWriter, req *http.Request) { lens := req.Context().Value(lensContextKey).(client.LensRegistry) - var cfg client.LensConfig - if err := requestJSON(req, &cfg); err != nil { + var request setMigrationRequest + if err := requestJSON(req, &request); err != nil { responseJSON(rw, http.StatusBadRequest, errorResponse{err}) return } - err := lens.SetMigration(req.Context(), cfg) + + err := lens.SetMigration(req.Context(), request.CollectionID, request.Config) if err != nil { responseJSON(rw, http.StatusBadRequest, errorResponse{err}) return @@ -52,12 +52,13 @@ func (s *lensHandler) SetMigration(rw http.ResponseWriter, req *http.Request) { func (s *lensHandler) MigrateUp(rw http.ResponseWriter, req *http.Request) { lens := req.Context().Value(lensContextKey).(client.LensRegistry) - var src []map[string]any - if err := requestJSON(req, &src); err != nil { + var request migrateRequest + if err := requestJSON(req, &request); err != nil { responseJSON(rw, http.StatusBadRequest, errorResponse{err}) return } - result, err := lens.MigrateUp(req.Context(), enumerable.New(src), chi.URLParam(req, "version")) + + result, err := lens.MigrateUp(req.Context(), enumerable.New(request.Data), request.CollectionID) if err != nil { responseJSON(rw, http.StatusBadRequest, errorResponse{err}) return @@ -76,12 +77,17 @@ func (s *lensHandler) MigrateUp(rw http.ResponseWriter, req *http.Request) { func (s *lensHandler) MigrateDown(rw http.ResponseWriter, req *http.Request) { lens := req.Context().Value(lensContextKey).(client.LensRegistry) - var src []map[string]any - if err := requestJSON(req, &src); err != nil { + var request migrateRequest + if err := requestJSON(req, &request); err != nil { + responseJSON(rw, http.StatusBadRequest, errorResponse{err}) + return + } + + result, err := lens.MigrateDown(req.Context(), enumerable.New(request.Data), request.CollectionID) + if err != nil { responseJSON(rw, http.StatusBadRequest, errorResponse{err}) return } - result, err := lens.MigrateDown(req.Context(), enumerable.New(src), chi.URLParam(req, "version")) if err != nil { responseJSON(rw, http.StatusBadRequest, errorResponse{err}) return @@ -97,32 +103,6 @@ func (s *lensHandler) MigrateDown(rw http.ResponseWriter, req *http.Request) { responseJSON(rw, http.StatusOK, value) } -func (s *lensHandler) Config(rw http.ResponseWriter, req *http.Request) { - lens := req.Context().Value(lensContextKey).(client.LensRegistry) - - cfgs, err := lens.Config(req.Context()) - if err != nil { - responseJSON(rw, http.StatusBadRequest, errorResponse{err}) - return - } - responseJSON(rw, http.StatusOK, cfgs) -} - -func (s *lensHandler) HasMigration(rw http.ResponseWriter, req *http.Request) { - lens := req.Context().Value(lensContextKey).(client.LensRegistry) - - exists, err := lens.HasMigration(req.Context(), chi.URLParam(req, "version")) - if err != nil { - responseJSON(rw, http.StatusBadRequest, errorResponse{err}) - return - } - if !exists { - responseJSON(rw, http.StatusNotFound, errorResponse{ErrMigrationNotFound}) - return - } - rw.WriteHeader(http.StatusOK) -} - func (h *lensHandler) bindRoutes(router *Router) { errorResponse := &openapi3.ResponseRef{ Ref: "#/components/responses/error", @@ -130,32 +110,20 @@ func (h *lensHandler) bindRoutes(router *Router) { successResponse := &openapi3.ResponseRef{ Ref: "#/components/responses/success", } - documentSchema := &openapi3.SchemaRef{ - Ref: "#/components/schemas/document", + migrateSchema := &openapi3.SchemaRef{ + Ref: "#/components/schemas/migrate_request", + } + setMigrationSchema := &openapi3.SchemaRef{ + Ref: "#/components/schemas/set_migration_request", } - - lensConfigSchema := openapi3.NewSchemaRef("#/components/schemas/lens_config", nil) - lensConfigArraySchema := openapi3.NewArraySchema() - lensConfigArraySchema.Items = lensConfigSchema - - lensConfigResponse := openapi3.NewResponse(). - WithDescription("Lens configurations"). - WithJSONSchema(lensConfigArraySchema) - - lensConfig := openapi3.NewOperation() - lensConfig.OperationID = "lens_config" - lensConfig.Description = "List lens migrations" - lensConfig.Tags = []string{"lens"} - lensConfig.AddResponse(200, lensConfigResponse) - lensConfig.Responses.Set("400", errorResponse) setMigrationRequest := openapi3.NewRequestBody(). WithRequired(true). - WithJSONSchemaRef(lensConfigSchema) + WithJSONSchemaRef(setMigrationSchema) setMigration := openapi3.NewOperation() - setMigration.OperationID = "lens_set_migration" - setMigration.Description = "Add a new lens migration" + setMigration.OperationID = "lens_registry_set_migration" + setMigration.Description = "Add a new lens migration to registry" setMigration.Tags = []string{"lens"} setMigration.RequestBody = &openapi3.RequestBodyRef{ Value: setMigrationRequest, @@ -165,7 +133,7 @@ func (h *lensHandler) bindRoutes(router *Router) { setMigration.Responses.Set("400", errorResponse) reloadLenses := openapi3.NewOperation() - reloadLenses.OperationID = "lens_reload" + reloadLenses.OperationID = "lens_registry_reload" reloadLenses.Description = "Reload lens migrations" reloadLenses.Tags = []string{"lens"} reloadLenses.Responses = openapi3.NewResponses() @@ -176,24 +144,13 @@ func (h *lensHandler) bindRoutes(router *Router) { WithRequired(true). WithSchema(openapi3.NewStringSchema()) - hasMigration := openapi3.NewOperation() - hasMigration.OperationID = "lens_has_migration" - hasMigration.Description = "Check if a migration exists" - hasMigration.Tags = []string{"lens"} - hasMigration.AddParameter(versionPathParam) - hasMigration.Responses = openapi3.NewResponses() - hasMigration.Responses.Set("200", successResponse) - hasMigration.Responses.Set("400", errorResponse) - - migrateSchema := openapi3.NewArraySchema() - migrateSchema.Items = documentSchema migrateRequest := openapi3.NewRequestBody(). WithRequired(true). - WithContent(openapi3.NewContentWithJSONSchema(migrateSchema)) + WithJSONSchemaRef(migrateSchema) migrateUp := openapi3.NewOperation() - migrateUp.OperationID = "lens_migrate_up" - migrateUp.Description = "Migrate documents to a schema version" + migrateUp.OperationID = "lens_registry_migrate_up" + migrateUp.Description = "Migrate documents to a collection" migrateUp.Tags = []string{"lens"} migrateUp.RequestBody = &openapi3.RequestBodyRef{ Value: migrateRequest, @@ -204,8 +161,8 @@ func (h *lensHandler) bindRoutes(router *Router) { migrateUp.Responses.Set("400", errorResponse) migrateDown := openapi3.NewOperation() - migrateDown.OperationID = "lens_migrate_down" - migrateDown.Description = "Migrate documents from a schema version" + migrateDown.OperationID = "lens_registry_migrate_down" + migrateDown.Description = "Migrate documents from a collection" migrateDown.Tags = []string{"lens"} migrateDown.RequestBody = &openapi3.RequestBodyRef{ Value: migrateRequest, @@ -215,10 +172,8 @@ func (h *lensHandler) bindRoutes(router *Router) { migrateDown.Responses.Set("200", successResponse) migrateDown.Responses.Set("400", errorResponse) - router.AddRoute("/lens", http.MethodGet, lensConfig, h.Config) - router.AddRoute("/lens", http.MethodPost, setMigration, h.SetMigration) - router.AddRoute("/lens/reload", http.MethodPost, reloadLenses, h.ReloadLenses) - router.AddRoute("/lens/{version}", http.MethodGet, hasMigration, h.HasMigration) - router.AddRoute("/lens/{version}/up", http.MethodPost, migrateUp, h.MigrateUp) - router.AddRoute("/lens/{version}/down", http.MethodPost, migrateDown, h.MigrateDown) + router.AddRoute("/lens/registry", http.MethodPost, setMigration, h.SetMigration) + router.AddRoute("/lens/registry/reload", http.MethodPost, reloadLenses, h.ReloadLenses) + router.AddRoute("/lens/registry/{version}/up", http.MethodPost, migrateUp, h.MigrateUp) + router.AddRoute("/lens/registry/{version}/down", http.MethodPost, migrateDown, h.MigrateDown) } diff --git a/http/handler_store.go b/http/handler_store.go index 2a1ff97531..db7e8acc8d 100644 --- a/http/handler_store.go +++ b/http/handler_store.go @@ -16,6 +16,7 @@ import ( "fmt" "io" "net/http" + "strconv" "github.com/getkin/kin-openapi/openapi3" @@ -82,7 +83,7 @@ func (s *storeHandler) PatchSchema(rw http.ResponseWriter, req *http.Request) { return } - err = store.PatchSchema(req.Context(), message.Patch, message.SetAsDefaultVersion) + err = store.PatchSchema(req.Context(), message.Patch, message.Migration, message.SetAsDefaultVersion) if err != nil { responseJSON(rw, http.StatusBadRequest, errorResponse{err}) return @@ -90,7 +91,7 @@ func (s *storeHandler) PatchSchema(rw http.ResponseWriter, req *http.Request) { rw.WriteHeader(http.StatusOK) } -func (s *storeHandler) SetDefaultSchemaVersion(rw http.ResponseWriter, req *http.Request) { +func (s *storeHandler) SetActiveSchemaVersion(rw http.ResponseWriter, req *http.Request) { store := req.Context().Value(storeContextKey).(client.Store) schemaVersionID, err := io.ReadAll(req.Body) @@ -98,7 +99,7 @@ func (s *storeHandler) SetDefaultSchemaVersion(rw http.ResponseWriter, req *http responseJSON(rw, http.StatusBadRequest, errorResponse{err}) return } - err = store.SetDefaultSchemaVersion(req.Context(), string(schemaVersionID)) + err = store.SetActiveSchemaVersion(req.Context(), string(schemaVersionID)) if err != nil { responseJSON(rw, http.StatusBadRequest, errorResponse{err}) return @@ -125,6 +126,23 @@ func (s *storeHandler) AddView(rw http.ResponseWriter, req *http.Request) { responseJSON(rw, http.StatusOK, defs) } +func (s *storeHandler) SetMigration(rw http.ResponseWriter, req *http.Request) { + store := req.Context().Value(storeContextKey).(client.Store) + + var cfg client.LensConfig + if err := requestJSON(req, &cfg); err != nil { + responseJSON(rw, http.StatusBadRequest, errorResponse{err}) + return + } + + err := store.SetMigration(req.Context(), cfg) + if err != nil { + responseJSON(rw, http.StatusBadRequest, errorResponse{err}) + return + } + rw.WriteHeader(http.StatusOK) +} + func (s *storeHandler) GetCollection(rw http.ResponseWriter, req *http.Request) { store := req.Context().Value(storeContextKey).(client.Store) @@ -159,7 +177,17 @@ func (s *storeHandler) GetCollection(rw http.ResponseWriter, req *http.Request) } responseJSON(rw, http.StatusOK, colDesc) default: - cols, err := store.GetAllCollections(req.Context()) + var getInactive bool + if req.URL.Query().Has("get_inactive") { + getInactiveStr := req.URL.Query().Get("get_inactive") + var err error + getInactive, err = strconv.ParseBool(getInactiveStr) + if err != nil { + responseJSON(rw, http.StatusBadRequest, errorResponse{err}) + return + } + } + cols, err := store.GetAllCollections(req.Context(), getInactive) if err != nil { responseJSON(rw, http.StatusBadRequest, errorResponse{err}) return @@ -363,6 +391,9 @@ func (h *storeHandler) bindRoutes(router *Router) { addViewSchema := &openapi3.SchemaRef{ Ref: "#/components/schemas/add_view_request", } + lensConfigSchema := &openapi3.SchemaRef{ + Ref: "#/components/schemas/lens_config", + } patchSchemaRequestSchema := &openapi3.SchemaRef{ Ref: "#/components/schemas/patch_schema_request", } @@ -450,6 +481,9 @@ func (h *storeHandler) bindRoutes(router *Router) { collectionVersionIdQueryParam := openapi3.NewQueryParameter("version_id"). WithDescription("Collection schema version id"). WithSchema(openapi3.NewStringSchema()) + collectionGetInactiveQueryParam := openapi3.NewQueryParameter("get_inactive"). + WithDescription("If true, inactive collections will be returned in addition to active ones"). + WithSchema(openapi3.NewStringSchema()) collectionsSchema := openapi3.NewArraySchema() collectionsSchema.Items = collectionSchema @@ -471,6 +505,7 @@ func (h *storeHandler) bindRoutes(router *Router) { collectionDescribe.AddParameter(collectionNameQueryParam) collectionDescribe.AddParameter(collectionSchemaRootQueryParam) collectionDescribe.AddParameter(collectionVersionIdQueryParam) + collectionDescribe.AddParameter(collectionGetInactiveQueryParam) collectionDescribe.AddResponse(200, collectionsResponse) collectionDescribe.Responses.Set("400", errorResponse) @@ -501,6 +536,21 @@ func (h *storeHandler) bindRoutes(router *Router) { views.AddResponse(200, addViewResponse) views.Responses.Set("400", errorResponse) + setMigrationRequest := openapi3.NewRequestBody(). + WithRequired(true). + WithJSONSchemaRef(lensConfigSchema) + + setMigration := openapi3.NewOperation() + setMigration.OperationID = "lens_set_migration" + setMigration.Description = "Add a new lens migration" + setMigration.Tags = []string{"lens"} + setMigration.RequestBody = &openapi3.RequestBodyRef{ + Value: setMigrationRequest, + } + setMigration.Responses = openapi3.NewResponses() + setMigration.Responses.Set("200", successResponse) + setMigration.Responses.Set("400", errorResponse) + schemaNameQueryParam := openapi3.NewQueryParameter("name"). WithDescription("Schema name"). WithSchema(openapi3.NewStringSchema()) @@ -574,11 +624,13 @@ func (h *storeHandler) bindRoutes(router *Router) { router.AddRoute("/backup/import", http.MethodPost, backupImport, h.BasicImport) router.AddRoute("/collections", http.MethodGet, collectionDescribe, h.GetCollection) router.AddRoute("/view", http.MethodPost, views, h.AddView) + router.AddRoute("/view", http.MethodPost, views, h.AddView) router.AddRoute("/graphql", http.MethodGet, graphQLGet, h.ExecRequest) router.AddRoute("/graphql", http.MethodPost, graphQLPost, h.ExecRequest) router.AddRoute("/debug/dump", http.MethodGet, debugDump, h.PrintDump) router.AddRoute("/schema", http.MethodPost, addSchema, h.AddSchema) router.AddRoute("/schema", http.MethodPatch, patchSchema, h.PatchSchema) router.AddRoute("/schema", http.MethodGet, schemaDescribe, h.GetSchema) - router.AddRoute("/schema/default", http.MethodPost, setDefaultSchemaVersion, h.SetDefaultSchemaVersion) + router.AddRoute("/schema/default", http.MethodPost, setDefaultSchemaVersion, h.SetActiveSchemaVersion) + router.AddRoute("/lens", http.MethodPost, setMigration, h.SetMigration) } diff --git a/http/openapi.go b/http/openapi.go index fc10881f5b..12a832c704 100644 --- a/http/openapi.go +++ b/http/openapi.go @@ -40,6 +40,8 @@ var openApiSchemas = map[string]any{ "ccip_response": &CCIPResponse{}, "patch_schema_request": &patchSchemaRequest{}, "add_view_request": &addViewRequest{}, + "migrate_request": &migrateRequest{}, + "set_migration_request": &setMigrationRequest{}, } func NewOpenAPISpec() (*openapi3.T, error) { diff --git a/lens/fetcher.go b/lens/fetcher.go index 71f5b6243a..c60686dd1b 100644 --- a/lens/fetcher.go +++ b/lens/fetcher.go @@ -76,27 +76,21 @@ func (f *lensedFetcher) Init( f.fieldDescriptionsByName[field.Name] = field } - cfg, err := f.registry.Config(ctx) - if err != nil { - return err - } - - history, err := getTargetedSchemaHistory(ctx, txn, cfg, f.col.Schema().Root, f.col.Schema().VersionID) + history, err := getTargetedSchemaHistory(ctx, txn, f.col.Schema().Root, f.col.Schema().VersionID) if err != nil { return err } f.lens = new(ctx, f.registry, f.col.Schema().VersionID, history) f.txn = txn - for schemaVersionID := range history { - hasMigration, err := f.registry.HasMigration(ctx, schemaVersionID) - if err != nil { - return err - } - - if hasMigration { - f.hasMigrations = true - break +historyLoop: + for _, historyItem := range history { + sources := historyItem.collection.CollectionSources() + for _, source := range sources { + if source.Transform.HasValue() { + f.hasMigrations = true + break historyLoop + } } } @@ -283,9 +277,9 @@ func (f *lensedFetcher) updateDataStore(ctx context.Context, original map[string } datastoreKeyBase := core.DataStoreKey{ - CollectionID: f.col.Description().IDString(), - DocID: docID, - InstanceType: core.ValueKey, + CollectionRootID: f.col.Description().RootID, + DocID: docID, + InstanceType: core.ValueKey, } for fieldName, value := range modifiedFieldValuesByName { diff --git a/lens/history.go b/lens/history.go index 56b43a9d5b..a7a5ee57d8 100644 --- a/lens/history.go +++ b/lens/history.go @@ -13,27 +13,26 @@ package lens import ( "context" - "github.com/ipfs/go-datastore/query" "github.com/sourcenetwork/immutable" "github.com/sourcenetwork/defradb/client" - "github.com/sourcenetwork/defradb/core" "github.com/sourcenetwork/defradb/datastore" + "github.com/sourcenetwork/defradb/db/description" ) // schemaHistoryLink represents an item in a particular schema's history, it // links to the previous and next version items if they exist. type schemaHistoryLink struct { - // The schema version id of this history item. - schemaVersionID string + // The collection as this point in history. + collection *client.CollectionDescription - // The history link to the next schema version, if there is one - // (for the most recent schema version this will be None). - next immutable.Option[*schemaHistoryLink] + // The history link to the next schema versions, if there are some + // (for the most recent schema version this will be empty). + next []*schemaHistoryLink - // The history link to the previous schema version, if there is - // one (for the initial schema version this will be None). - previous immutable.Option[*schemaHistoryLink] + // The history link to the previous schema versions, if there are + // some (for the initial schema version this will be empty). + previous []*schemaHistoryLink } // targetedSchemaHistoryLink represents an item in a particular schema's history, it @@ -42,8 +41,8 @@ type schemaHistoryLink struct { // It also contains a vector which describes the distance and direction to the // target schema version (given as an input param on construction). type targetedSchemaHistoryLink struct { - // The schema version id of this history item. - schemaVersionID string + // The collection as this point in history. + collection *client.CollectionDescription // The link to next schema version, if there is one // (for the most recent schema version this will be None). @@ -69,11 +68,10 @@ type targetedSchemaHistoryLink struct { func getTargetedSchemaHistory( ctx context.Context, txn datastore.Txn, - lensConfigs []client.LensConfig, schemaRoot string, targetSchemaVersionID string, ) (map[schemaVersionID]*targetedSchemaHistoryLink, error) { - history, err := getSchemaHistory(ctx, txn, lensConfigs, schemaRoot) + history, err := getSchemaHistory(ctx, txn, schemaRoot) if err != nil { return nil, err } @@ -81,18 +79,22 @@ func getTargetedSchemaHistory( result := map[schemaVersionID]*targetedSchemaHistoryLink{} for _, item := range history { - result[item.schemaVersionID] = &targetedSchemaHistoryLink{ - schemaVersionID: item.schemaVersionID, + result[item.collection.SchemaVersionID] = &targetedSchemaHistoryLink{ + collection: item.collection, } } for _, item := range result { - schemaHistoryLink := history[item.schemaVersionID] - nextHistoryItem := schemaHistoryLink.next - if !nextHistoryItem.HasValue() { + schemaHistoryLink := history[item.collection.ID] + nextHistoryItems := schemaHistoryLink.next + if len(nextHistoryItems) == 0 { continue } - nextItem := result[nextHistoryItem.Value().schemaVersionID] + + // WARNING: This line assumes that each collection can only have a single source, and so + // just takes the first item. If/when collections can have multiple sources we will need to change + // this slightly. + nextItem := result[nextHistoryItems[0].collection.SchemaVersionID] item.next = immutable.Some(nextItem) nextItem.previous = immutable.Some(item) } @@ -100,7 +102,7 @@ func getTargetedSchemaHistory( orphanSchemaVersions := map[string]struct{}{} for schemaVersion, item := range result { - if item.schemaVersionID == targetSchemaVersionID { + if item.collection.SchemaVersionID == targetSchemaVersionID { continue } if item.targetVector != 0 { @@ -122,7 +124,7 @@ func getTargetedSchemaHistory( wasFound = true break } - if currentItem.schemaVersionID == targetSchemaVersionID { + if currentItem.collection.SchemaVersionID == targetSchemaVersionID { wasFound = true break } @@ -143,7 +145,7 @@ func getTargetedSchemaHistory( wasFound = true break } - if currentItem.schemaVersionID == targetSchemaVersionID { + if currentItem.collection.SchemaVersionID == targetSchemaVersionID { wasFound = true break } @@ -169,11 +171,6 @@ func getTargetedSchemaHistory( return result, nil } -type schemaHistoryPairing struct { - schemaVersionID string - nextSchemaVersionID string -} - // getSchemaHistory returns the history of the schema of the given id as linked list // with each item mapped by schema version id. // @@ -182,96 +179,35 @@ type schemaHistoryPairing struct { func getSchemaHistory( ctx context.Context, txn datastore.Txn, - lensConfigs []client.LensConfig, schemaRoot string, -) (map[schemaVersionID]*schemaHistoryLink, error) { - pairings := map[string]*schemaHistoryPairing{} - - for _, config := range lensConfigs { - pairings[config.SourceSchemaVersionID] = &schemaHistoryPairing{ - schemaVersionID: config.SourceSchemaVersionID, - nextSchemaVersionID: config.DestinationSchemaVersionID, - } - - if _, ok := pairings[config.DestinationSchemaVersionID]; !ok { - pairings[config.DestinationSchemaVersionID] = &schemaHistoryPairing{ - schemaVersionID: config.DestinationSchemaVersionID, - } - } - } - - prefix := core.NewSchemaHistoryKey(schemaRoot, "") - q, err := txn.Systemstore().Query(ctx, query.Query{ - Prefix: prefix.ToString(), - }) +) (map[collectionID]*schemaHistoryLink, error) { + cols, err := description.GetCollectionsBySchemaRoot(ctx, txn, schemaRoot) if err != nil { return nil, err } - for res := range q.Next() { - // check for Done on context first - select { - case <-ctx.Done(): - // we've been cancelled! ;) - return nil, q.Close() - default: - // noop, just continue on the with the for loop - } - - if res.Error != nil { - err = q.Close() - if err != nil { - return nil, err - } - return nil, res.Error - } + history := map[collectionID]*schemaHistoryLink{} - key, err := core.NewSchemaHistoryKeyFromString(res.Key) - if err != nil { - err = q.Close() - if err != nil { - return nil, err - } - return nil, err - } - - // The local schema version history takes priority over and migration-defined history - // and overwrites whatever already exists in the pairings (if any) - pairings[key.PreviousSchemaVersionID] = &schemaHistoryPairing{ - schemaVersionID: key.PreviousSchemaVersionID, - nextSchemaVersionID: string(res.Value), - } - - if _, ok := pairings[string(res.Value)]; !ok { - pairings[string(res.Value)] = &schemaHistoryPairing{ - schemaVersionID: string(res.Value), - } - } - } - - err = q.Close() - if err != nil { - return nil, err - } - - history := map[schemaVersionID]*schemaHistoryLink{} - - for _, pairing := range pairings { + for _, c := range cols { + col := c // Convert the temporary types to the cleaner return type: - history[pairing.schemaVersionID] = &schemaHistoryLink{ - schemaVersionID: pairing.schemaVersionID, + history[col.ID] = &schemaHistoryLink{ + collection: &col, } } - for _, pairing := range pairings { - src := history[pairing.schemaVersionID] - - // Use the internal pairings to set the next/previous links. This must be - // done after the `history` map has been fully populated, else `src` and - // `next` may not yet have been added to the map. - if next, hasNext := history[pairing.nextSchemaVersionID]; hasNext { - src.next = immutable.Some(next) - next.previous = immutable.Some(src) + for _, historyItem := range history { + for _, source := range historyItem.collection.CollectionSources() { + src := history[source.SourceCollectionID] + historyItem.previous = append( + historyItem.next, + src, + ) + + src.next = append( + src.next, + historyItem, + ) } } diff --git a/lens/lens.go b/lens/lens.go index 86fcb0876f..4e700d7324 100644 --- a/lens/lens.go +++ b/lens/lens.go @@ -19,6 +19,7 @@ import ( ) type schemaVersionID = string +type collectionID = uint32 // LensDoc represents a document that will be sent to/from a Lens. type LensDoc = map[string]any @@ -151,10 +152,10 @@ func (l *lens) Next() (bool, error) { var pipeHead enumerable.Enumerable[LensDoc] for { - junctionPipe, junctionPreviouslyExisted := l.lensPipesBySchemaVersionIDs[historyLocation.schemaVersionID] + junctionPipe, junctionPreviouslyExisted := l.lensPipesBySchemaVersionIDs[historyLocation.collection.SchemaVersionID] if !junctionPreviouslyExisted { versionInputPipe := enumerable.NewQueue[LensDoc]() - l.lensInputPipesBySchemaVersionIDs[historyLocation.schemaVersionID] = versionInputPipe + l.lensInputPipesBySchemaVersionIDs[historyLocation.collection.SchemaVersionID] = versionInputPipe if inputPipe == nil { // The input pipe will be fed documents which are currently at this schema version inputPipe = versionInputPipe @@ -162,7 +163,7 @@ func (l *lens) Next() (bool, error) { // It is a source of the schemaVersion junction pipe, other schema versions // may also join as sources to this junction pipe junctionPipe = enumerable.Concat[LensDoc](versionInputPipe) - l.lensPipesBySchemaVersionIDs[historyLocation.schemaVersionID] = junctionPipe + l.lensPipesBySchemaVersionIDs[historyLocation.collection.SchemaVersionID] = junctionPipe } // If we have previously laid pipe, we need to connect it to the current junction. @@ -181,7 +182,7 @@ func (l *lens) Next() (bool, error) { // Aquire a lens migration from the registery, using the junctionPipe as its source. // The new pipeHead will then be connected as a source to the next migration-stage on // the next loop. - pipeHead, err = l.lensRegistry.MigrateUp(l.ctx, junctionPipe, historyLocation.schemaVersionID) + pipeHead, err = l.lensRegistry.MigrateUp(l.ctx, junctionPipe, historyLocation.next.Value().collection.ID) if err != nil { return false, err } @@ -191,7 +192,7 @@ func (l *lens) Next() (bool, error) { // Aquire a lens migration from the registery, using the junctionPipe as its source. // The new pipeHead will then be connected as a source to the next migration-stage on // the next loop. - pipeHead, err = l.lensRegistry.MigrateDown(l.ctx, junctionPipe, historyLocation.previous.Value().schemaVersionID) + pipeHead, err = l.lensRegistry.MigrateDown(l.ctx, junctionPipe, historyLocation.collection.ID) if err != nil { return false, err } diff --git a/lens/registry.go b/lens/registry.go index 20b125a498..ba24779611 100644 --- a/lens/registry.go +++ b/lens/registry.go @@ -12,10 +12,8 @@ package lens import ( "context" - "encoding/json" "sync" - "github.com/ipfs/go-datastore/query" "github.com/lens-vm/lens/host-go/config" "github.com/lens-vm/lens/host-go/config/model" "github.com/lens-vm/lens/host-go/engine/module" @@ -24,8 +22,8 @@ import ( "github.com/sourcenetwork/immutable/enumerable" "github.com/sourcenetwork/defradb/client" - "github.com/sourcenetwork/defradb/core" "github.com/sourcenetwork/defradb/datastore" + "github.com/sourcenetwork/defradb/db/description" "github.com/sourcenetwork/defradb/errors" ) @@ -46,13 +44,9 @@ type lensRegistry struct { modulesByPath map[string]module.Module moduleLock sync.Mutex - lensPoolsBySchemaVersionID map[string]*lensPool - reversedPoolsBySchemaVersionID map[string]*lensPool - poolLock sync.RWMutex - - // lens configurations by source schema version ID - configs map[string]client.LensConfig - configLock sync.RWMutex + lensPoolsByCollectionID map[uint32]*lensPool + reversedPoolsByCollectionID map[uint32]*lensPool + poolLock sync.RWMutex // Writable transaction contexts by transaction ID. // @@ -65,18 +59,16 @@ type lensRegistry struct { // stuff within here should be accessible from within this transaction but not // from outside. type txnContext struct { - txn datastore.Txn - lensPoolsBySchemaVersionID map[string]*lensPool - reversedPoolsBySchemaVersionID map[string]*lensPool - configs map[string]client.LensConfig + txn datastore.Txn + lensPoolsByCollectionID map[uint32]*lensPool + reversedPoolsByCollectionID map[uint32]*lensPool } func newTxnCtx(txn datastore.Txn) *txnContext { return &txnContext{ - txn: txn, - lensPoolsBySchemaVersionID: map[string]*lensPool{}, - reversedPoolsBySchemaVersionID: map[string]*lensPool{}, - configs: map[string]client.LensConfig{}, + txn: txn, + lensPoolsByCollectionID: map[uint32]*lensPool{}, + reversedPoolsByCollectionID: map[uint32]*lensPool{}, } } @@ -103,13 +95,12 @@ func NewRegistry(lensPoolSize immutable.Option[int], db TxnSource) client.LensRe return &implicitTxnLensRegistry{ db: db, registry: &lensRegistry{ - poolSize: size, - runtime: wasmtime.New(), - modulesByPath: map[string]module.Module{}, - lensPoolsBySchemaVersionID: map[string]*lensPool{}, - reversedPoolsBySchemaVersionID: map[string]*lensPool{}, - configs: map[string]client.LensConfig{}, - txnCtxs: map[uint64]*txnContext{}, + poolSize: size, + runtime: wasmtime.New(), + modulesByPath: map[string]module.Module{}, + lensPoolsByCollectionID: map[uint32]*lensPool{}, + reversedPoolsByCollectionID: map[uint32]*lensPool{}, + txnCtxs: map[uint64]*txnContext{}, }, } } @@ -133,20 +124,14 @@ func (r *lensRegistry) getCtx(txn datastore.Txn, readonly bool) *txnContext { txnCtx.txn.OnSuccess(func() { r.poolLock.Lock() - for schemaVersionID, locker := range txnCtx.lensPoolsBySchemaVersionID { - r.lensPoolsBySchemaVersionID[schemaVersionID] = locker + for collectionID, locker := range txnCtx.lensPoolsByCollectionID { + r.lensPoolsByCollectionID[collectionID] = locker } - for schemaVersionID, locker := range txnCtx.reversedPoolsBySchemaVersionID { - r.reversedPoolsBySchemaVersionID[schemaVersionID] = locker + for collectionID, locker := range txnCtx.reversedPoolsByCollectionID { + r.reversedPoolsByCollectionID[collectionID] = locker } r.poolLock.Unlock() - r.configLock.Lock() - for schemaVersionID, cfg := range txnCtx.configs { - r.configs[schemaVersionID] = cfg - } - r.configLock.Unlock() - r.txnLock.Lock() delete(r.txnCtxs, txn.ID()) r.txnLock.Unlock() @@ -169,28 +154,12 @@ func (r *lensRegistry) getCtx(txn datastore.Txn, readonly bool) *txnContext { return txnCtx } -func (r *lensRegistry) setMigration(ctx context.Context, txnCtx *txnContext, cfg client.LensConfig) error { - key := core.NewSchemaVersionMigrationKey(cfg.SourceSchemaVersionID) - - json, err := json.Marshal(cfg) - if err != nil { - return err - } - - err = txnCtx.txn.Systemstore().Put(ctx, key.ToDS(), json) - if err != nil { - return err - } - - err = r.cacheLens(txnCtx, cfg) - if err != nil { - return err - } - - return nil -} - -func (r *lensRegistry) cacheLens(txnCtx *txnContext, cfg client.LensConfig) error { +func (r *lensRegistry) setMigration( + ctx context.Context, + txnCtx *txnContext, + collectionID uint32, + cfg model.Lens, +) error { inversedModuleCfgs := make([]model.LensModule, len(cfg.Lenses)) for i, moduleCfg := range cfg.Lenses { // Reverse the order of the lenses for the inverse migration. @@ -204,19 +173,15 @@ func (r *lensRegistry) cacheLens(txnCtx *txnContext, cfg client.LensConfig) erro } } - reversedCfg := client.LensConfig{ - SourceSchemaVersionID: cfg.SourceSchemaVersionID, - DestinationSchemaVersionID: cfg.DestinationSchemaVersionID, - Lens: model.Lens{ - Lenses: inversedModuleCfgs, - }, + reversedCfg := model.Lens{ + Lenses: inversedModuleCfgs, } - err := r.cachePool(txnCtx.txn, txnCtx.lensPoolsBySchemaVersionID, cfg) + err := r.cachePool(txnCtx.txn, txnCtx.lensPoolsByCollectionID, cfg, collectionID) if err != nil { return err } - err = r.cachePool(txnCtx.txn, txnCtx.reversedPoolsBySchemaVersionID, reversedCfg) + err = r.cachePool(txnCtx.txn, txnCtx.reversedPoolsByCollectionID, reversedCfg, collectionID) // For now, checking this error is the best way of determining if a migration has an inverse. // Inverses are optional. //nolint:revive @@ -224,12 +189,15 @@ func (r *lensRegistry) cacheLens(txnCtx *txnContext, cfg client.LensConfig) erro return err } - txnCtx.configs[cfg.SourceSchemaVersionID] = cfg - return nil } -func (r *lensRegistry) cachePool(txn datastore.Txn, target map[string]*lensPool, cfg client.LensConfig) error { +func (r *lensRegistry) cachePool( + txn datastore.Txn, + target map[uint32]*lensPool, + cfg model.Lens, + collectionID uint32, +) error { pool := r.newPool(r.poolSize, cfg) for i := 0; i < r.poolSize; i++ { @@ -240,94 +208,64 @@ func (r *lensRegistry) cachePool(txn datastore.Txn, target map[string]*lensPool, pool.returnLens(lensPipe) } - target[cfg.SourceSchemaVersionID] = pool + target[collectionID] = pool return nil } func (r *lensRegistry) reloadLenses(ctx context.Context, txnCtx *txnContext) error { - prefix := core.NewSchemaVersionMigrationKey("") - q, err := txnCtx.txn.Systemstore().Query(ctx, query.Query{ - Prefix: prefix.ToString(), - }) + cols, err := description.GetCollections(ctx, txnCtx.txn) if err != nil { return err } - for res := range q.Next() { - // check for Done on context first - select { - case <-ctx.Done(): - // we've been cancelled! ;) - err = q.Close() - if err != nil { - return err - } - - return nil - default: - // noop, just continue on the with the for loop - } + for _, col := range cols { + sources := col.CollectionSources() - if res.Error != nil { - err = q.Close() - if err != nil { - return errors.Wrap(err.Error(), res.Error) - } - return res.Error + if len(sources) == 0 { + continue } - var cfg client.LensConfig - err = json.Unmarshal(res.Value, &cfg) - if err != nil { - err = q.Close() - if err != nil { - return err - } - return err + // WARNING: Here we are only dealing with the first source in the set, this is fine for now as + // currently collections can only have one source, however this code will need to change if/when + // collections support multiple sources. + + if !sources[0].Transform.HasValue() { + continue } - err = r.cacheLens(txnCtx, cfg) + err = r.setMigration(ctx, txnCtx, col.ID, sources[0].Transform.Value()) if err != nil { - err = q.Close() - if err != nil { - return errors.Wrap(err.Error(), res.Error) - } return err } } - err = q.Close() - if err != nil { - return err - } - return nil } func (r *lensRegistry) migrateUp( txnCtx *txnContext, src enumerable.Enumerable[LensDoc], - schemaVersionID string, + collectionID uint32, ) (enumerable.Enumerable[LensDoc], error) { - return r.migrate(r.lensPoolsBySchemaVersionID, txnCtx.lensPoolsBySchemaVersionID, src, schemaVersionID) + return r.migrate(r.lensPoolsByCollectionID, txnCtx.lensPoolsByCollectionID, src, collectionID) } func (r *lensRegistry) migrateDown( txnCtx *txnContext, src enumerable.Enumerable[LensDoc], - schemaVersionID string, + collectionID uint32, ) (enumerable.Enumerable[LensDoc], error) { - return r.migrate(r.reversedPoolsBySchemaVersionID, txnCtx.reversedPoolsBySchemaVersionID, src, schemaVersionID) + return r.migrate(r.reversedPoolsByCollectionID, txnCtx.reversedPoolsByCollectionID, src, collectionID) } func (r *lensRegistry) migrate( - pools map[string]*lensPool, - txnPools map[string]*lensPool, + pools map[uint32]*lensPool, + txnPools map[uint32]*lensPool, src enumerable.Enumerable[LensDoc], - schemaVersionID string, + collectionID uint32, ) (enumerable.Enumerable[LensDoc], error) { - lensPool, ok := r.getPool(pools, txnPools, schemaVersionID) + lensPool, ok := r.getPool(pools, txnPools, collectionID) if !ok { // If there are no migrations for this schema version, just return the given source. return src, nil @@ -343,44 +281,17 @@ func (r *lensRegistry) migrate( return lens, nil } -func (r *lensRegistry) config(txnCtx *txnContext) []client.LensConfig { - configs := map[string]client.LensConfig{} - r.configLock.RLock() - for schemaVersionID, cfg := range r.configs { - configs[schemaVersionID] = cfg - } - r.configLock.RUnlock() - - // If within a txn actively writing to this registry overwrite - // values from the (commited) registry. - // Note: Config cannot be removed, only replaced at the moment. - for schemaVersionID, cfg := range txnCtx.configs { - configs[schemaVersionID] = cfg - } - - result := []client.LensConfig{} - for _, cfg := range configs { - result = append(result, cfg) - } - return result -} - -func (r *lensRegistry) hasMigration(txnCtx *txnContext, schemaVersionID string) bool { - _, hasMigration := r.getPool(r.lensPoolsBySchemaVersionID, txnCtx.lensPoolsBySchemaVersionID, schemaVersionID) - return hasMigration -} - func (r *lensRegistry) getPool( - pools map[string]*lensPool, - txnPools map[string]*lensPool, - schemaVersionID string, + pools map[uint32]*lensPool, + txnPools map[uint32]*lensPool, + collectionID uint32, ) (*lensPool, bool) { - if pool, ok := txnPools[schemaVersionID]; ok { + if pool, ok := txnPools[collectionID]; ok { return pool, true } r.poolLock.RLock() - pool, ok := pools[schemaVersionID] + pool, ok := pools[collectionID] r.poolLock.RUnlock() return pool, ok } @@ -392,7 +303,7 @@ func (r *lensRegistry) getPool( // so we need to limit how frequently we do this. type lensPool struct { // The config used to create the lenses within this locker. - cfg client.LensConfig + cfg model.Lens registry *lensRegistry @@ -405,7 +316,7 @@ type lensPool struct { pipes chan *lensPipe } -func (r *lensRegistry) newPool(lensPoolSize int, cfg client.LensConfig) *lensPool { +func (r *lensRegistry) newPool(lensPoolSize int, cfg model.Lens) *lensPool { return &lensPool{ cfg: cfg, registry: r, @@ -472,11 +383,11 @@ type lensPipe struct { var _ enumerable.Socket[LensDoc] = (*lensPipe)(nil) -func (r *lensRegistry) newLensPipe(cfg client.LensConfig) (*lensPipe, error) { +func (r *lensRegistry) newLensPipe(cfg model.Lens) (*lensPipe, error) { socket := enumerable.NewSocket[LensDoc]() r.moduleLock.Lock() - enumerable, err := config.LoadInto[LensDoc, LensDoc](r.runtime, r.modulesByPath, cfg.Lens, socket) + enumerable, err := config.LoadInto[LensDoc, LensDoc](r.runtime, r.modulesByPath, cfg, socket) r.moduleLock.Unlock() if err != nil { diff --git a/lens/txn_registry.go b/lens/txn_registry.go index 954db01e0c..8093dedbdd 100644 --- a/lens/txn_registry.go +++ b/lens/txn_registry.go @@ -13,6 +13,7 @@ package lens import ( "context" + "github.com/lens-vm/lens/host-go/config/model" "github.com/sourcenetwork/immutable/enumerable" "github.com/sourcenetwork/defradb/client" @@ -46,7 +47,7 @@ func (r *explicitTxnLensRegistry) WithTxn(txn datastore.Txn) client.LensRegistry } } -func (r *implicitTxnLensRegistry) SetMigration(ctx context.Context, cfg client.LensConfig) error { +func (r *implicitTxnLensRegistry) SetMigration(ctx context.Context, collectionID uint32, cfg model.Lens) error { txn, err := r.db.NewTxn(ctx, false) if err != nil { return err @@ -54,7 +55,7 @@ func (r *implicitTxnLensRegistry) SetMigration(ctx context.Context, cfg client.L defer txn.Discard(ctx) txnCtx := r.registry.getCtx(txn, false) - err = r.registry.setMigration(ctx, txnCtx, cfg) + err = r.registry.setMigration(ctx, txnCtx, collectionID, cfg) if err != nil { return err } @@ -62,8 +63,8 @@ func (r *implicitTxnLensRegistry) SetMigration(ctx context.Context, cfg client.L return txn.Commit(ctx) } -func (r *explicitTxnLensRegistry) SetMigration(ctx context.Context, cfg client.LensConfig) error { - return r.registry.setMigration(ctx, r.registry.getCtx(r.txn, false), cfg) +func (r *explicitTxnLensRegistry) SetMigration(ctx context.Context, collectionID uint32, cfg model.Lens) error { + return r.registry.setMigration(ctx, r.registry.getCtx(r.txn, false), collectionID, cfg) } func (r *implicitTxnLensRegistry) ReloadLenses(ctx context.Context) error { @@ -89,7 +90,7 @@ func (r *explicitTxnLensRegistry) ReloadLenses(ctx context.Context) error { func (r *implicitTxnLensRegistry) MigrateUp( ctx context.Context, src enumerable.Enumerable[LensDoc], - schemaVersionID string, + collectionID uint32, ) (enumerable.Enumerable[map[string]any], error) { txn, err := r.db.NewTxn(ctx, true) if err != nil { @@ -98,21 +99,21 @@ func (r *implicitTxnLensRegistry) MigrateUp( defer txn.Discard(ctx) txnCtx := newTxnCtx(txn) - return r.registry.migrateUp(txnCtx, src, schemaVersionID) + return r.registry.migrateUp(txnCtx, src, collectionID) } func (r *explicitTxnLensRegistry) MigrateUp( ctx context.Context, src enumerable.Enumerable[LensDoc], - schemaVersionID string, + collectionID uint32, ) (enumerable.Enumerable[map[string]any], error) { - return r.registry.migrateUp(r.registry.getCtx(r.txn, true), src, schemaVersionID) + return r.registry.migrateUp(r.registry.getCtx(r.txn, true), src, collectionID) } func (r *implicitTxnLensRegistry) MigrateDown( ctx context.Context, src enumerable.Enumerable[LensDoc], - schemaVersionID string, + collectionID uint32, ) (enumerable.Enumerable[map[string]any], error) { txn, err := r.db.NewTxn(ctx, true) if err != nil { @@ -121,43 +122,13 @@ func (r *implicitTxnLensRegistry) MigrateDown( defer txn.Discard(ctx) txnCtx := newTxnCtx(txn) - return r.registry.migrateDown(txnCtx, src, schemaVersionID) + return r.registry.migrateDown(txnCtx, src, collectionID) } func (r *explicitTxnLensRegistry) MigrateDown( ctx context.Context, src enumerable.Enumerable[LensDoc], - schemaVersionID string, + collectionID uint32, ) (enumerable.Enumerable[map[string]any], error) { - return r.registry.migrateDown(r.registry.getCtx(r.txn, true), src, schemaVersionID) -} - -func (r *implicitTxnLensRegistry) Config(ctx context.Context) ([]client.LensConfig, error) { - txn, err := r.db.NewTxn(ctx, true) - if err != nil { - return nil, err - } - defer txn.Discard(ctx) - txnCtx := newTxnCtx(txn) - - return r.registry.config(txnCtx), nil -} - -func (r *explicitTxnLensRegistry) Config(ctx context.Context) ([]client.LensConfig, error) { - return r.registry.config(r.registry.getCtx(r.txn, true)), nil -} - -func (r *implicitTxnLensRegistry) HasMigration(ctx context.Context, schemaVersionID string) (bool, error) { - txn, err := r.db.NewTxn(ctx, true) - if err != nil { - return false, err - } - defer txn.Discard(ctx) - txnCtx := newTxnCtx(txn) - - return r.registry.hasMigration(txnCtx, schemaVersionID), nil -} - -func (r *explicitTxnLensRegistry) HasMigration(ctx context.Context, schemaVersionID string) (bool, error) { - return r.registry.hasMigration(r.registry.getCtx(r.txn, true), schemaVersionID), nil + return r.registry.migrateDown(r.registry.getCtx(r.txn, true), src, collectionID) } diff --git a/net/peer_replicator.go b/net/peer_replicator.go index 8756959db8..287dc4cd48 100644 --- a/net/peer_replicator.go +++ b/net/peer_replicator.go @@ -53,7 +53,7 @@ func (p *Peer) SetReplicator(ctx context.Context, rep client.Replicator) error { default: // default to all collections - collections, err = p.db.WithTxn(txn).GetAllCollections(ctx) + collections, err = p.db.WithTxn(txn).GetAllCollections(ctx, false) if err != nil { return NewErrReplicatorCollections(err) } @@ -139,7 +139,7 @@ func (p *Peer) DeleteReplicator(ctx context.Context, rep client.Replicator) erro default: // default to all collections - collections, err = p.db.WithTxn(txn).GetAllCollections(ctx) + collections, err = p.db.WithTxn(txn).GetAllCollections(ctx, false) if err != nil { return NewErrReplicatorCollections(err) } diff --git a/net/server.go b/net/server.go index e93000d1b9..78781100b6 100644 --- a/net/server.go +++ b/net/server.go @@ -96,7 +96,7 @@ func newServer(p *Peer, db client.DB, opts ...grpc.DialOption) (*server, error) // Get all DocIDs across all collections in the DB log.Debug(p.ctx, "Getting all existing DocIDs...") - cols, err := s.db.GetAllCollections(s.peer.ctx) + cols, err := s.db.GetAllCollections(s.peer.ctx, false) if err != nil { return nil, err } @@ -269,7 +269,13 @@ func (s *server) PushLog(ctx context.Context, req *pb.PushLogRequest) (*pb.PushL if len(cols) == 0 { return nil, client.NewErrCollectionNotFoundForSchema(schemaRoot) } - col := cols[0] + var col client.Collection + for _, c := range cols { + if col != nil && col.Name().HasValue() && !c.Name().HasValue() { + continue + } + col = c + } // Create a new DAG service with the current transaction var getter format.NodeGetter = s.peer.newDAGSyncerTxn(txn) diff --git a/net/server_test.go b/net/server_test.go index 5606dc3dc7..1f87e7f2fa 100644 --- a/net/server_test.go +++ b/net/server_test.go @@ -49,7 +49,7 @@ type mockDBColError struct { client.DB } -func (mDB *mockDBColError) GetAllCollections(context.Context) ([]client.Collection, error) { +func (mDB *mockDBColError) GetAllCollections(context.Context, bool) ([]client.Collection, error) { return nil, mockError } @@ -85,7 +85,7 @@ type mockDBDocIDsError struct { client.DB } -func (mDB *mockDBDocIDsError) GetAllCollections(context.Context) ([]client.Collection, error) { +func (mDB *mockDBDocIDsError) GetAllCollections(context.Context, bool) ([]client.Collection, error) { return []client.Collection{ &mockCollection{}, }, nil diff --git a/tests/clients/cli/wrapper.go b/tests/clients/cli/wrapper.go index 230230cfaf..86c13eff74 100644 --- a/tests/clients/cli/wrapper.go +++ b/tests/clients/cli/wrapper.go @@ -17,10 +17,13 @@ import ( "fmt" "io" "net/http/httptest" + "strconv" "strings" blockstore "github.com/ipfs/boxo/blockstore" + "github.com/lens-vm/lens/host-go/config/model" "github.com/libp2p/go-libp2p/core/peer" + "github.com/sourcenetwork/immutable" "github.com/sourcenetwork/defradb/cli" "github.com/sourcenetwork/defradb/client" @@ -186,20 +189,29 @@ func (w *Wrapper) AddSchema(ctx context.Context, schema string) ([]client.Collec func (w *Wrapper) PatchSchema( ctx context.Context, patch string, + migration immutable.Option[model.Lens], setDefault bool, ) error { args := []string{"client", "schema", "patch"} if setDefault { - args = append(args, "--set-default") + args = append(args, "--set-active") } args = append(args, patch) + if migration.HasValue() { + lenses, err := json.Marshal(migration.Value()) + if err != nil { + return err + } + args = append(args, string(lenses)) + } + _, err := w.cmd.execute(ctx, args) return err } -func (w *Wrapper) SetDefaultSchemaVersion(ctx context.Context, schemaVersionID string) error { - args := []string{"client", "schema", "set-default"} +func (w *Wrapper) SetActiveSchemaVersion(ctx context.Context, schemaVersionID string) error { + args := []string{"client", "schema", "set-active"} args = append(args, schemaVersionID) _, err := w.cmd.execute(ctx, args) @@ -223,7 +235,18 @@ func (w *Wrapper) AddView(ctx context.Context, query string, sdl string) ([]clie } func (w *Wrapper) SetMigration(ctx context.Context, config client.LensConfig) error { - return w.LensRegistry().SetMigration(ctx, config) + args := []string{"client", "schema", "migration", "set"} + + lenses, err := json.Marshal(config.Lens) + if err != nil { + return err + } + args = append(args, config.SourceSchemaVersionID) + args = append(args, config.DestinationSchemaVersionID) + args = append(args, string(lenses)) + + _, err = w.cmd.execute(ctx, args) + return err } func (w *Wrapper) LensRegistry() client.LensRegistry { @@ -283,8 +306,9 @@ func (w *Wrapper) GetCollectionsByVersionID(ctx context.Context, versionId strin return cols, err } -func (w *Wrapper) GetAllCollections(ctx context.Context) ([]client.Collection, error) { +func (w *Wrapper) GetAllCollections(ctx context.Context, getInactive bool) ([]client.Collection, error) { args := []string{"client", "collection", "describe"} + args = append(args, "--get-inactive", strconv.FormatBool(getInactive)) data, err := w.cmd.execute(ctx, args) if err != nil { diff --git a/tests/clients/cli/wrapper_lens.go b/tests/clients/cli/wrapper_lens.go index 679a792662..da6011b9eb 100644 --- a/tests/clients/cli/wrapper_lens.go +++ b/tests/clients/cli/wrapper_lens.go @@ -13,7 +13,10 @@ package cli import ( "context" "encoding/json" + "fmt" + "strconv" + "github.com/lens-vm/lens/host-go/config/model" "github.com/sourcenetwork/immutable/enumerable" "github.com/sourcenetwork/defradb/client" @@ -30,16 +33,15 @@ func (w *LensRegistry) WithTxn(tx datastore.Txn) client.LensRegistry { return &LensRegistry{w.cmd.withTxn(tx)} } -func (w *LensRegistry) SetMigration(ctx context.Context, config client.LensConfig) error { - args := []string{"client", "schema", "migration", "set"} - args = append(args, config.SourceSchemaVersionID) - args = append(args, config.DestinationSchemaVersionID) +func (w *LensRegistry) SetMigration(ctx context.Context, collectionID uint32, config model.Lens) error { + args := []string{"client", "schema", "migration", "set-registry"} - lensCfg, err := json.Marshal(config.Lens) + lenses, err := json.Marshal(config) if err != nil { return err } - args = append(args, string(lensCfg)) + args = append(args, strconv.FormatUint(uint64(collectionID), 10)) + args = append(args, string(lenses)) _, err = w.cmd.execute(ctx, args) return err @@ -55,10 +57,10 @@ func (w *LensRegistry) ReloadLenses(ctx context.Context) error { func (w *LensRegistry) MigrateUp( ctx context.Context, src enumerable.Enumerable[map[string]any], - schemaVersionID string, + collectionID uint32, ) (enumerable.Enumerable[map[string]any], error) { args := []string{"client", "schema", "migration", "up"} - args = append(args, "--version", schemaVersionID) + args = append(args, "--collection", fmt.Sprint(collectionID)) var srcData []map[string]any err := enumerable.ForEach(src, func(item map[string]any) { @@ -87,10 +89,10 @@ func (w *LensRegistry) MigrateUp( func (w *LensRegistry) MigrateDown( ctx context.Context, src enumerable.Enumerable[map[string]any], - schemaVersionID string, + collectionID uint32, ) (enumerable.Enumerable[map[string]any], error) { args := []string{"client", "schema", "migration", "down"} - args = append(args, "--version", schemaVersionID) + args = append(args, "--collection", fmt.Sprint(collectionID)) var srcData []map[string]any err := enumerable.ForEach(src, func(item map[string]any) { @@ -115,31 +117,3 @@ func (w *LensRegistry) MigrateDown( } return out, nil } - -func (w *LensRegistry) Config(ctx context.Context) ([]client.LensConfig, error) { - args := []string{"client", "schema", "migration", "get"} - - data, err := w.cmd.execute(ctx, args) - if err != nil { - return nil, err - } - var cfgs []client.LensConfig - if err := json.Unmarshal(data, &cfgs); err != nil { - return nil, err - } - return cfgs, nil -} - -func (w *LensRegistry) HasMigration(ctx context.Context, schemaVersionID string) (bool, error) { - cfgs, err := w.Config(ctx) - if err != nil { - return false, err - } - found := false - for _, cfg := range cfgs { - if cfg.SourceSchemaVersionID == schemaVersionID { - found = true - } - } - return found, nil -} diff --git a/tests/clients/http/wrapper.go b/tests/clients/http/wrapper.go index 7c65368445..770aab7b00 100644 --- a/tests/clients/http/wrapper.go +++ b/tests/clients/http/wrapper.go @@ -15,7 +15,9 @@ import ( "net/http/httptest" blockstore "github.com/ipfs/boxo/blockstore" + "github.com/lens-vm/lens/host-go/config/model" "github.com/libp2p/go-libp2p/core/peer" + "github.com/sourcenetwork/immutable" "github.com/sourcenetwork/defradb/client" "github.com/sourcenetwork/defradb/datastore" @@ -98,13 +100,14 @@ func (w *Wrapper) AddSchema(ctx context.Context, schema string) ([]client.Collec func (w *Wrapper) PatchSchema( ctx context.Context, patch string, + migration immutable.Option[model.Lens], setAsDefaultVersion bool, ) error { - return w.client.PatchSchema(ctx, patch, setAsDefaultVersion) + return w.client.PatchSchema(ctx, patch, migration, setAsDefaultVersion) } -func (w *Wrapper) SetDefaultSchemaVersion(ctx context.Context, schemaVersionID string) error { - return w.client.SetDefaultSchemaVersion(ctx, schemaVersionID) +func (w *Wrapper) SetActiveSchemaVersion(ctx context.Context, schemaVersionID string) error { + return w.client.SetActiveSchemaVersion(ctx, schemaVersionID) } func (w *Wrapper) AddView(ctx context.Context, query string, sdl string) ([]client.CollectionDefinition, error) { @@ -131,8 +134,8 @@ func (w *Wrapper) GetCollectionsByVersionID(ctx context.Context, versionId strin return w.client.GetCollectionsByVersionID(ctx, versionId) } -func (w *Wrapper) GetAllCollections(ctx context.Context) ([]client.Collection, error) { - return w.client.GetAllCollections(ctx) +func (w *Wrapper) GetAllCollections(ctx context.Context, getInactive bool) ([]client.Collection, error) { + return w.client.GetAllCollections(ctx, getInactive) } func (w *Wrapper) GetSchemasByName(ctx context.Context, name string) ([]client.SchemaDescription, error) { diff --git a/tests/gen/cli/gendocs.go b/tests/gen/cli/gendocs.go index 226d73bc97..f365180e3f 100644 --- a/tests/gen/cli/gendocs.go +++ b/tests/gen/cli/gendocs.go @@ -54,7 +54,7 @@ Example: The following command generates 100 User documents and 500 Device docum return NewErrInvalidDemandValue(err) } - collections, err := store.GetAllCollections(cmd.Context()) + collections, err := store.GetAllCollections(cmd.Context(), false) if err != nil { return err } diff --git a/tests/integration/lens.go b/tests/integration/lens.go index e69437d87b..69c49a1cbc 100644 --- a/tests/integration/lens.go +++ b/tests/integration/lens.go @@ -12,8 +12,6 @@ package tests import ( "github.com/sourcenetwork/immutable" - "github.com/stretchr/testify/assert" - "github.com/stretchr/testify/require" "github.com/sourcenetwork/defradb/client" ) @@ -39,19 +37,6 @@ type ConfigureMigration struct { ExpectedError string } -// GetMigrations is a test action which will fetch and assert on the results of calling -// `LensRegistry().Config()`. -type GetMigrations struct { - // NodeID is the node ID (index) of the node in which to configure the migration. - NodeID immutable.Option[int] - - // Used to identify the transaction for this to run against. Optional. - TransactionID immutable.Option[int] - - // The expected configuration. - ExpectedResults []client.LensConfig -} - func configureMigration( s *state, action ConfigureMigration, @@ -65,45 +50,3 @@ func configureMigration( assertExpectedErrorRaised(s.t, s.testCase.Description, action.ExpectedError, expectedErrorRaised) } } - -func getMigrations( - s *state, - action GetMigrations, -) { - for _, node := range getNodes(action.NodeID, s.nodes) { - db := getStore(s, node, action.TransactionID, "") - - configs, err := db.LensRegistry().Config(s.ctx) - require.NoError(s.t, err) - require.Equal(s.t, len(configs), len(action.ExpectedResults)) - - // The order of the results is not deterministic, so do not assert on the element - for _, expected := range action.ExpectedResults { - var actual client.LensConfig - var actualFound bool - - for _, config := range configs { - if config.SourceSchemaVersionID != expected.SourceSchemaVersionID { - continue - } - if config.DestinationSchemaVersionID != expected.DestinationSchemaVersionID { - continue - } - actual = config - actualFound = true - } - - require.True(s.t, actualFound, "matching lens config not found") - require.Equal(s.t, len(expected.Lenses), len(actual.Lenses)) - - for j, actualLens := range actual.Lenses { - expectedLens := expected.Lenses[j] - - assert.Equal(s.t, expectedLens.Inverse, actualLens.Inverse) - assert.Equal(s.t, expectedLens.Path, actualLens.Path) - - assertResultsEqual(s.t, s.clientType, expectedLens.Arguments, actualLens.Arguments) - } - } - } -} diff --git a/tests/integration/schema/migrations/query/simple_test.go b/tests/integration/schema/migrations/query/simple_test.go index 6395aa0094..f00b76f230 100644 --- a/tests/integration/schema/migrations/query/simple_test.go +++ b/tests/integration/schema/migrations/query/simple_test.go @@ -466,8 +466,8 @@ func TestSchemaMigrationQueryMigratesAcrossMultipleVersionsBeforePatches(t *test }, testUtils.ConfigureMigration{ LensConfig: client.LensConfig{ - SourceSchemaVersionID: "bafkreig3zt63qt7bkji47etyu2sqtzroa3tcfdxgwqc3ka2ijy63refq3a", - DestinationSchemaVersionID: "bafkreia4m6sn2rfypj2velvwpyude22fcb5jyfzum2eh3cdzg4a3myj5nu", + SourceSchemaVersionID: "bafkreibjb4h5nudsei7cq2kkontjinmjpbqls2tmowqp5nxougu4tuus4i", + DestinationSchemaVersionID: "bafkreih6o2jyurelxtpbg66gk23pio2tq6o3aed334z6w2u3qwve3at7ku", Lens: model.Lens{ Lenses: []model.LensModule{ { @@ -483,8 +483,8 @@ func TestSchemaMigrationQueryMigratesAcrossMultipleVersionsBeforePatches(t *test }, testUtils.ConfigureMigration{ LensConfig: client.LensConfig{ - SourceSchemaVersionID: "bafkreia4m6sn2rfypj2velvwpyude22fcb5jyfzum2eh3cdzg4a3myj5nu", - DestinationSchemaVersionID: "bafkreifiai7ukztmfavmicyq6hummnwpueq475ddn6m5wsbjhhpjtp6fcy", + SourceSchemaVersionID: "bafkreih6o2jyurelxtpbg66gk23pio2tq6o3aed334z6w2u3qwve3at7ku", + DestinationSchemaVersionID: "bafkreihv4ktjwzyhhkmas5iz4q7cawet4aeurqci33i66wr225l5pet4qu", Lens: model.Lens{ Lenses: []model.LensModule{ { @@ -534,6 +534,93 @@ func TestSchemaMigrationQueryMigratesAcrossMultipleVersionsBeforePatches(t *test testUtils.ExecuteTestCase(t, test) } +func TestSchemaMigrationQueryMigratesAcrossMultipleVersionsBeforePatchesWrongOrder(t *testing.T) { + test := testUtils.TestCase{ + Description: "Test schema migration, multiple migrations before patch", + Actions: []any{ + testUtils.SchemaUpdate{ + Schema: ` + type Users { + name: String + } + `, + }, + testUtils.CreateDoc{ + Doc: `{ + "name": "John" + }`, + }, + testUtils.ConfigureMigration{ + // Declare the migration from v2=>v3 before declaring the migration from v1=>v2 + LensConfig: client.LensConfig{ + SourceSchemaVersionID: "bafkreih6o2jyurelxtpbg66gk23pio2tq6o3aed334z6w2u3qwve3at7ku", + DestinationSchemaVersionID: "bafkreihv4ktjwzyhhkmas5iz4q7cawet4aeurqci33i66wr225l5pet4qu", + Lens: model.Lens{ + Lenses: []model.LensModule{ + { + Path: lenses.SetDefaultModulePath, + Arguments: map[string]any{ + "dst": "email", + "value": "ilovewasm@source.com", + }, + }, + }, + }, + }, + }, + testUtils.ConfigureMigration{ + LensConfig: client.LensConfig{ + SourceSchemaVersionID: "bafkreibjb4h5nudsei7cq2kkontjinmjpbqls2tmowqp5nxougu4tuus4i", + DestinationSchemaVersionID: "bafkreih6o2jyurelxtpbg66gk23pio2tq6o3aed334z6w2u3qwve3at7ku", + Lens: model.Lens{ + Lenses: []model.LensModule{ + { + Path: lenses.SetDefaultModulePath, + Arguments: map[string]any{ + "dst": "verified", + "value": true, + }, + }, + }, + }, + }, + }, + testUtils.SchemaPatch{ + Patch: ` + [ + { "op": "add", "path": "/Users/Fields/-", "value": {"Name": "verified", "Kind": "Boolean"} } + ] + `, + }, + testUtils.SchemaPatch{ + Patch: ` + [ + { "op": "add", "path": "/Users/Fields/-", "value": {"Name": "email", "Kind": "String"} } + ] + `, + }, + testUtils.Request{ + Request: `query { + Users { + name + verified + email + } + }`, + Results: []map[string]any{ + { + "name": "John", + "verified": true, + "email": "ilovewasm@source.com", + }, + }, + }, + }, + } + + testUtils.ExecuteTestCase(t, test) +} + // This test is important as it tests that orphan migrations do not block the fetcher(s) // from functioning. // @@ -750,7 +837,7 @@ func TestSchemaMigrationQueryMigrationRemovesExistingField(t *testing.T) { testUtils.ConfigureMigration{ LensConfig: client.LensConfig{ SourceSchemaVersionID: "bafkreihh4zkyuqk4ibwb5utyayvbw75hdfkueg3scm7taysk3rbh2jhaee", - DestinationSchemaVersionID: "bafkreigamaevrkcknutb275x3uxpgc2sn73qsfvkjqli7fiqaxfnniunjy", + DestinationSchemaVersionID: "bafkreifzcnwsq2os36utxnqpmq74wjt7o2czkcjo6exzv4fhm3ni2ounxe", Lens: model.Lens{ Lenses: []model.LensModule{ { @@ -811,7 +898,7 @@ func TestSchemaMigrationQueryMigrationPreservesExistingFieldWhenFieldNotRequeste testUtils.ConfigureMigration{ LensConfig: client.LensConfig{ SourceSchemaVersionID: "bafkreihh4zkyuqk4ibwb5utyayvbw75hdfkueg3scm7taysk3rbh2jhaee", - DestinationSchemaVersionID: "bafkreigamaevrkcknutb275x3uxpgc2sn73qsfvkjqli7fiqaxfnniunjy", + DestinationSchemaVersionID: "bafkreifzcnwsq2os36utxnqpmq74wjt7o2czkcjo6exzv4fhm3ni2ounxe", Lens: model.Lens{ Lenses: []model.LensModule{ { @@ -885,7 +972,7 @@ func TestSchemaMigrationQueryMigrationCopiesExistingFieldWhenSrcFieldNotRequeste testUtils.ConfigureMigration{ LensConfig: client.LensConfig{ SourceSchemaVersionID: "bafkreihh4zkyuqk4ibwb5utyayvbw75hdfkueg3scm7taysk3rbh2jhaee", - DestinationSchemaVersionID: "bafkreigamaevrkcknutb275x3uxpgc2sn73qsfvkjqli7fiqaxfnniunjy", + DestinationSchemaVersionID: "bafkreica72aah4lm4sry67eqxqufsr24to6abgocomra4qeokwa7oaazwi", Lens: model.Lens{ Lenses: []model.LensModule{ { @@ -947,7 +1034,7 @@ func TestSchemaMigrationQueryMigrationCopiesExistingFieldWhenSrcAndDstFieldNotRe testUtils.ConfigureMigration{ LensConfig: client.LensConfig{ SourceSchemaVersionID: "bafkreihh4zkyuqk4ibwb5utyayvbw75hdfkueg3scm7taysk3rbh2jhaee", - DestinationSchemaVersionID: "bafkreigamaevrkcknutb275x3uxpgc2sn73qsfvkjqli7fiqaxfnniunjy", + DestinationSchemaVersionID: "bafkreica72aah4lm4sry67eqxqufsr24to6abgocomra4qeokwa7oaazwi", Lens: model.Lens{ Lenses: []model.LensModule{ { diff --git a/tests/integration/schema/migrations/query/with_p2p_test.go b/tests/integration/schema/migrations/query/with_p2p_test.go index 9f3e068ca0..a56e2defa0 100644 --- a/tests/integration/schema/migrations/query/with_p2p_test.go +++ b/tests/integration/schema/migrations/query/with_p2p_test.go @@ -145,8 +145,8 @@ func TestSchemaMigrationQueryWithP2PReplicatedDocAtMuchOlderSchemaVersion(t *tes testUtils.ConfigureMigration{ // Register the migration on both nodes. LensConfig: client.LensConfig{ - SourceSchemaVersionID: "bafkreibgg4ex7aya4w4x3dnrlyov4juyuffjjokzkjrpoupncfuvsyi6du", - DestinationSchemaVersionID: "bafkreidvp3xozpau2zanh7s5or4fhr7kchm6klznsyzd7fpcm3sh2xlgfm", + SourceSchemaVersionID: "bafkreiadnck34zzbwayjw3aeubw7eg4jmgtwoibu35tkxbjpar5rzxkdpu", + DestinationSchemaVersionID: "bafkreibzqyjmyjs7vyo2q4h2tv5rbdbe4lv7tjbl5esilmobhgclia2juy", Lens: model.Lens{ Lenses: []model.LensModule{ { @@ -163,8 +163,8 @@ func TestSchemaMigrationQueryWithP2PReplicatedDocAtMuchOlderSchemaVersion(t *tes testUtils.ConfigureMigration{ // Register the migration on both nodes. LensConfig: client.LensConfig{ - SourceSchemaVersionID: "bafkreidvp3xozpau2zanh7s5or4fhr7kchm6klznsyzd7fpcm3sh2xlgfm", - DestinationSchemaVersionID: "bafkreib7x3ifkcrp6ddr22vatirkcrotx6feombf3n7q2fnxbj5bvcl3cy", + SourceSchemaVersionID: "bafkreibzqyjmyjs7vyo2q4h2tv5rbdbe4lv7tjbl5esilmobhgclia2juy", + DestinationSchemaVersionID: "bafkreicvjzscbtmuff7m7swfmmunnclp66ky4sxhfixacq2yuvion5s5ti", Lens: model.Lens{ Lenses: []model.LensModule{ { diff --git a/tests/integration/schema/migrations/query/with_restart_test.go b/tests/integration/schema/migrations/query/with_restart_test.go index 793d0116f0..dcbd88553e 100644 --- a/tests/integration/schema/migrations/query/with_restart_test.go +++ b/tests/integration/schema/migrations/query/with_restart_test.go @@ -99,8 +99,8 @@ func TestSchemaMigrationQueryWithRestartAndMigrationBeforeSchemaPatch(t *testing }, testUtils.ConfigureMigration{ LensConfig: client.LensConfig{ - SourceSchemaVersionID: "bafkreig3zt63qt7bkji47etyu2sqtzroa3tcfdxgwqc3ka2ijy63refq3a", - DestinationSchemaVersionID: "bafkreia4m6sn2rfypj2velvwpyude22fcb5jyfzum2eh3cdzg4a3myj5nu", + SourceSchemaVersionID: "bafkreibjb4h5nudsei7cq2kkontjinmjpbqls2tmowqp5nxougu4tuus4i", + DestinationSchemaVersionID: "bafkreih6o2jyurelxtpbg66gk23pio2tq6o3aed334z6w2u3qwve3at7ku", Lens: model.Lens{ Lenses: []model.LensModule{ { diff --git a/tests/integration/schema/migrations/query/with_set_default_test.go b/tests/integration/schema/migrations/query/with_set_default_test.go index cd272fc860..c947de28c1 100644 --- a/tests/integration/schema/migrations/query/with_set_default_test.go +++ b/tests/integration/schema/migrations/query/with_set_default_test.go @@ -47,25 +47,19 @@ func TestSchemaMigrationQuery_WithSetDefaultToLatest_AppliesForwardMigration(t * ] `, SetAsDefaultVersion: immutable.Some(false), - }, - testUtils.ConfigureMigration{ - LensConfig: client.LensConfig{ - SourceSchemaVersionID: "bafkreiadnck34zzbwayjw3aeubw7eg4jmgtwoibu35tkxbjpar5rzxkdpu", - DestinationSchemaVersionID: schemaVersionID2, - Lens: model.Lens{ - Lenses: []model.LensModule{ - { - Path: lenses.SetDefaultModulePath, - Arguments: map[string]any{ - "dst": "verified", - "value": true, - }, + Lens: immutable.Some(model.Lens{ + Lenses: []model.LensModule{ + { + Path: lenses.SetDefaultModulePath, + Arguments: map[string]any{ + "dst": "verified", + "value": true, }, }, }, - }, + }), }, - testUtils.SetDefaultSchemaVersion{ + testUtils.SetActiveSchemaVersion{ SchemaVersionID: schemaVersionID2, }, testUtils.Request{ @@ -111,7 +105,7 @@ func TestSchemaMigrationQuery_WithSetDefaultToOriginal_AppliesInverseMigration(t `, SetAsDefaultVersion: immutable.Some(false), }, - testUtils.SetDefaultSchemaVersion{ + testUtils.SetActiveSchemaVersion{ SchemaVersionID: schemaVersionID2, }, // Create John using the new schema version @@ -139,7 +133,7 @@ func TestSchemaMigrationQuery_WithSetDefaultToOriginal_AppliesInverseMigration(t }, }, // Set the schema version back to the original - testUtils.SetDefaultSchemaVersion{ + testUtils.SetActiveSchemaVersion{ SchemaVersionID: schemaVersionID1, }, testUtils.Request{ @@ -211,7 +205,7 @@ func TestSchemaMigrationQuery_WithSetDefaultToOriginalVersionThatDocWasCreatedAt }, }, // Set the schema version back to the original - testUtils.SetDefaultSchemaVersion{ + testUtils.SetActiveSchemaVersion{ SchemaVersionID: schemaVersionID1, }, testUtils.Request{ diff --git a/tests/integration/schema/migrations/simple_test.go b/tests/integration/schema/migrations/simple_test.go index 172b3d3503..66d34f9e8d 100644 --- a/tests/integration/schema/migrations/simple_test.go +++ b/tests/integration/schema/migrations/simple_test.go @@ -14,6 +14,7 @@ import ( "testing" "github.com/lens-vm/lens/host-go/config/model" + "github.com/sourcenetwork/immutable" "github.com/sourcenetwork/defradb/client" testUtils "github.com/sourcenetwork/defradb/tests/integration" @@ -43,20 +44,32 @@ func TestSchemaMigrationDoesNotErrorGivenUnknownSchemaRoots(t *testing.T) { }, }, }, - testUtils.GetMigrations{ - ExpectedResults: []client.LensConfig{ + testUtils.GetCollections{ + GetInactive: true, + ExpectedResults: []client.CollectionDescription{ { - SourceSchemaVersionID: "does not exist", - DestinationSchemaVersionID: "also does not exist", - Lens: model.Lens{ - Lenses: []model.LensModule{ - { - Path: lenses.SetDefaultModulePath, - Arguments: map[string]any{ - "dst": "verified", - "value": false, + ID: 1, + SchemaVersionID: "does not exist", + }, + { + ID: 2, + SchemaVersionID: "also does not exist", + Sources: []any{ + &client.CollectionSource{ + SourceCollectionID: 1, + Transform: immutable.Some( + model.Lens{ + Lenses: []model.LensModule{ + { + Path: lenses.SetDefaultModulePath, + Arguments: map[string]any{ + "dst": "verified", + "value": false, + }, + }, + }, }, - }, + ), }, }, }, @@ -106,35 +119,58 @@ func TestSchemaMigrationGetMigrationsReturnsMultiple(t *testing.T) { }, }, }, - testUtils.GetMigrations{ - ExpectedResults: []client.LensConfig{ + testUtils.GetCollections{ + GetInactive: true, + ExpectedResults: []client.CollectionDescription{ + { + ID: 1, + SchemaVersionID: "does not exist", + }, { - SourceSchemaVersionID: "does not exist", - DestinationSchemaVersionID: "also does not exist", - Lens: model.Lens{ - Lenses: []model.LensModule{ - { - Path: lenses.SetDefaultModulePath, - Arguments: map[string]any{ - "dst": "verified", - "value": false, + ID: 2, + SchemaVersionID: "also does not exist", + Sources: []any{ + &client.CollectionSource{ + SourceCollectionID: 1, + Transform: immutable.Some( + model.Lens{ + Lenses: []model.LensModule{ + { + Path: lenses.SetDefaultModulePath, + Arguments: map[string]any{ + "dst": "verified", + "value": false, + }, + }, + }, }, - }, + ), }, }, }, { - SourceSchemaVersionID: "bafkreibjb4h5nudsei7cq2kkontjinmjpbqls2tmowqp5nxougu4tuus4i", - DestinationSchemaVersionID: "bafkreih6o2jyurelxtpbg66gk23pio2tq6o3aed334z6w2u3qwve3at7ku", - Lens: model.Lens{ - Lenses: []model.LensModule{ - { - Path: lenses.SetDefaultModulePath, - Arguments: map[string]any{ - "dst": "verified", - "value": true, + ID: 3, + SchemaVersionID: "bafkreibjb4h5nudsei7cq2kkontjinmjpbqls2tmowqp5nxougu4tuus4i", + }, + { + ID: 4, + SchemaVersionID: "bafkreih6o2jyurelxtpbg66gk23pio2tq6o3aed334z6w2u3qwve3at7ku", + Sources: []any{ + &client.CollectionSource{ + SourceCollectionID: 3, + Transform: immutable.Some( + model.Lens{ + Lenses: []model.LensModule{ + { + Path: lenses.SetDefaultModulePath, + Arguments: map[string]any{ + "dst": "verified", + "value": true, + }, + }, + }, }, - }, + ), }, }, }, @@ -185,20 +221,54 @@ func TestSchemaMigrationReplacesExistingMigationBasedOnSourceID(t *testing.T) { }, }, }, - testUtils.GetMigrations{ - ExpectedResults: []client.LensConfig{ + testUtils.GetCollections{ + GetInactive: true, + ExpectedResults: []client.CollectionDescription{ { - SourceSchemaVersionID: "a", - DestinationSchemaVersionID: "c", - Lens: model.Lens{ - Lenses: []model.LensModule{ - { - Path: lenses.SetDefaultModulePath, - Arguments: map[string]any{ - "dst": "age", - "value": 123, + ID: 1, + SchemaVersionID: "a", + }, + { + ID: 2, + SchemaVersionID: "b", + Sources: []any{ + &client.CollectionSource{ + SourceCollectionID: 1, + Transform: immutable.Some( + model.Lens{ + Lenses: []model.LensModule{ + { + Path: lenses.SetDefaultModulePath, + Arguments: map[string]any{ + "dst": "verified", + "value": false, + }, + }, + }, }, - }, + ), + }, + }, + }, + { + ID: 3, + SchemaVersionID: "c", + Sources: []any{ + &client.CollectionSource{ + SourceCollectionID: 1, + Transform: immutable.Some( + model.Lens{ + Lenses: []model.LensModule{ + { + Path: lenses.SetDefaultModulePath, + Arguments: map[string]any{ + "dst": "age", + "value": float64(123), + }, + }, + }, + }, + ), }, }, }, diff --git a/tests/integration/schema/migrations/with_txn_test.go b/tests/integration/schema/migrations/with_txn_test.go index 827f40de5e..217c8c0a74 100644 --- a/tests/integration/schema/migrations/with_txn_test.go +++ b/tests/integration/schema/migrations/with_txn_test.go @@ -43,23 +43,33 @@ func TestSchemaMigrationGetMigrationsWithTxn(t *testing.T) { }, }, }, - testUtils.GetMigrations{ + testUtils.GetCollections{ TransactionID: immutable.Some(0), - // This is the bug - although the GetMigrations call and migration are on the same transaction - // the migration is not returned in the results. - ExpectedResults: []client.LensConfig{ + GetInactive: true, + ExpectedResults: []client.CollectionDescription{ { - SourceSchemaVersionID: "does not exist", - DestinationSchemaVersionID: "also does not exist", - Lens: model.Lens{ - Lenses: []model.LensModule{ - { - Path: lenses.SetDefaultModulePath, - Arguments: map[string]any{ - "dst": "verified", - "value": false, + ID: 1, + SchemaVersionID: "does not exist", + }, + { + ID: 2, + SchemaVersionID: "also does not exist", + Sources: []any{ + &client.CollectionSource{ + SourceCollectionID: 1, + Transform: immutable.Some( + model.Lens{ + Lenses: []model.LensModule{ + { + Path: lenses.SetDefaultModulePath, + Arguments: map[string]any{ + "dst": "verified", + "value": false, + }, + }, + }, }, - }, + ), }, }, }, diff --git a/tests/integration/schema/updates/with_schema_branch_test.go b/tests/integration/schema/updates/with_schema_branch_test.go new file mode 100644 index 0000000000..5d1deacfb9 --- /dev/null +++ b/tests/integration/schema/updates/with_schema_branch_test.go @@ -0,0 +1,547 @@ +// Copyright 2024 Democratized Data Foundation +// +// Use of this software is governed by the Business Source License +// included in the file licenses/BSL.txt. +// +// As of the Change Date specified in that file, in accordance with +// the Business Source License, use of this software will be governed +// by the Apache License, Version 2.0, included in the file +// licenses/APL.txt. + +package updates + +import ( + "testing" + + "github.com/sourcenetwork/immutable" + + "github.com/sourcenetwork/defradb/client" + testUtils "github.com/sourcenetwork/defradb/tests/integration" +) + +func TestSchemaUpdates_WithBranchingSchema(t *testing.T) { + schemaVersion1ID := "bafkreibjb4h5nudsei7cq2kkontjinmjpbqls2tmowqp5nxougu4tuus4i" + schemaVersion2ID := "bafkreibzozorw6lqjn5bjogsqxeqcswoqedcatdvphhts4frd7mb4jn7x4" + schemaVersion3ID := "bafkreiahizg44dgnuniim3y75ztjtj67kkezkit7w445lfpirx6iq6ixg4" + + test := testUtils.TestCase{ + Description: "Test schema update, with branching schema", + Actions: []any{ + testUtils.SchemaUpdate{ + Schema: ` + type Users { + name: String + } + `, + }, + testUtils.SchemaPatch{ + // The second schema version will not be set as the active version, leaving the initial version active + SetAsDefaultVersion: immutable.Some(false), + Patch: ` + [ + { "op": "add", "path": "/Users/Fields/-", "value": {"Name": "email", "Kind": 11} } + ] + `, + }, + testUtils.SchemaPatch{ + // The third schema version will be set as the active version, going from version 1 to 3 + SetAsDefaultVersion: immutable.Some(true), + Patch: ` + [ + { "op": "add", "path": "/Users/Fields/-", "value": {"Name": "phone", "Kind": 11} } + ] + `, + }, + testUtils.Request{ + Request: `query { + Users { + name + email + } + }`, + // The email field is not queriable + ExpectedError: `Cannot query field "email" on type "Users".`, + }, + testUtils.GetSchema{ + // The second schema version is present in the system, with the email field + VersionID: immutable.Some(schemaVersion2ID), + ExpectedResults: []client.SchemaDescription{ + { + Name: "Users", + VersionID: schemaVersion2ID, + Root: schemaVersion1ID, + Fields: []client.FieldDescription{ + { + Name: "_docID", + Kind: client.FieldKind_DocID, + Typ: client.LWW_REGISTER, + }, + { + Name: "name", + ID: 1, + Kind: client.FieldKind_NILLABLE_STRING, + Typ: client.LWW_REGISTER, + }, + { + Name: "email", + ID: 2, + Kind: client.FieldKind_NILLABLE_STRING, + Typ: client.LWW_REGISTER, + }, + }, + }, + }, + }, + testUtils.Request{ + // The phone field is queriable + Request: `query { + Users { + name + phone + } + }`, + Results: []map[string]any{}, + }, + testUtils.GetSchema{ + // The third schema version is present in the system, with the phone field + VersionID: immutable.Some(schemaVersion3ID), + ExpectedResults: []client.SchemaDescription{ + { + Name: "Users", + VersionID: schemaVersion3ID, + Root: schemaVersion1ID, + Fields: []client.FieldDescription{ + { + Name: "_docID", + Kind: client.FieldKind_DocID, + Typ: client.LWW_REGISTER, + }, + { + Name: "name", + ID: 1, + Kind: client.FieldKind_NILLABLE_STRING, + Typ: client.LWW_REGISTER, + }, + { + Name: "phone", + ID: 2, + Kind: client.FieldKind_NILLABLE_STRING, + Typ: client.LWW_REGISTER, + }, + }, + }, + }, + }, + testUtils.GetCollections{ + GetInactive: true, + ExpectedResults: []client.CollectionDescription{ + { + // The original collection version is present, it has no source and is inactive (has no name). + ID: 1, + SchemaVersionID: schemaVersion1ID, + }, + { + // The collection version for schema version 2 is present, it has the first collection as a source + // and is inactive. + ID: 2, + SchemaVersionID: schemaVersion2ID, + Sources: []any{ + &client.CollectionSource{ + SourceCollectionID: 1, + }, + }, + }, + { + // The collection version for schema version 3 is present and is active, it also has the first collection + // as source. + ID: 3, + Name: immutable.Some("Users"), + SchemaVersionID: schemaVersion3ID, + Sources: []any{ + &client.CollectionSource{ + SourceCollectionID: 1, + }, + }, + }, + }, + }, + }, + } + testUtils.ExecuteTestCase(t, test) +} + +func TestSchemaUpdates_WithPatchOnBranchedSchema(t *testing.T) { + schemaVersion1ID := "bafkreibjb4h5nudsei7cq2kkontjinmjpbqls2tmowqp5nxougu4tuus4i" + schemaVersion2ID := "bafkreibzozorw6lqjn5bjogsqxeqcswoqedcatdvphhts4frd7mb4jn7x4" + schemaVersion3ID := "bafkreiahizg44dgnuniim3y75ztjtj67kkezkit7w445lfpirx6iq6ixg4" + schemaVersion4ID := "bafkreig2b545qyt3luwmt37uyofbka2flmbc3kkhoifsh7mv2rgqy7fgty" + + test := testUtils.TestCase{ + Description: "Test schema update, with patch on branching schema", + Actions: []any{ + testUtils.SchemaUpdate{ + Schema: ` + type Users { + name: String + } + `, + }, + testUtils.SchemaPatch{ + // The second schema version will not be set as the active version, leaving the initial version active + SetAsDefaultVersion: immutable.Some(false), + Patch: ` + [ + { "op": "add", "path": "/Users/Fields/-", "value": {"Name": "email", "Kind": 11} } + ] + `, + }, + testUtils.SchemaPatch{ + // The third schema version will be set as the active version, going from version 1 to 3 + SetAsDefaultVersion: immutable.Some(true), + Patch: ` + [ + { "op": "add", "path": "/Users/Fields/-", "value": {"Name": "phone", "Kind": 11} } + ] + `, + }, + testUtils.SchemaPatch{ + // The fourth schema version will be set as the active version, going from version 3 to 4 + SetAsDefaultVersion: immutable.Some(true), + Patch: ` + [ + { "op": "add", "path": "/Users/Fields/-", "value": {"Name": "discordName", "Kind": 11} } + ] + `, + }, + testUtils.Request{ + // The phone and discordName fields are queriable + Request: `query { + Users { + name + phone + discordName + } + }`, + Results: []map[string]any{}, + }, + testUtils.GetSchema{ + // The fourth schema version is present in the system, with the phone and discordName field + VersionID: immutable.Some(schemaVersion4ID), + ExpectedResults: []client.SchemaDescription{ + { + Name: "Users", + VersionID: schemaVersion4ID, + Root: schemaVersion1ID, + Fields: []client.FieldDescription{ + { + Name: "_docID", + Kind: client.FieldKind_DocID, + Typ: client.LWW_REGISTER, + }, + { + Name: "name", + ID: 1, + Kind: client.FieldKind_NILLABLE_STRING, + Typ: client.LWW_REGISTER, + }, + { + Name: "phone", + ID: 2, + Kind: client.FieldKind_NILLABLE_STRING, + Typ: client.LWW_REGISTER, + }, + { + Name: "discordName", + ID: 3, + Kind: client.FieldKind_NILLABLE_STRING, + Typ: client.LWW_REGISTER, + }, + }, + }, + }, + }, + testUtils.GetCollections{ + GetInactive: true, + ExpectedResults: []client.CollectionDescription{ + { + // The original collection version is present, it has no source and is inactive (has no name). + ID: 1, + SchemaVersionID: schemaVersion1ID, + }, + { + // The collection version for schema version 2 is present, it has the first collection as a source + // and is inactive. + ID: 2, + SchemaVersionID: schemaVersion2ID, + Sources: []any{ + &client.CollectionSource{ + SourceCollectionID: 1, + }, + }, + }, + { + // The collection version for schema version 3 is present and inactive, it has the first collection + // as source. + ID: 3, + SchemaVersionID: schemaVersion3ID, + Sources: []any{ + &client.CollectionSource{ + SourceCollectionID: 1, + }, + }, + }, + { + // The collection version for schema version 4 is present and is active, it also has the third collection + // as source. + ID: 4, + Name: immutable.Some("Users"), + SchemaVersionID: schemaVersion4ID, + Sources: []any{ + &client.CollectionSource{ + SourceCollectionID: 3, + }, + }, + }, + }, + }, + }, + } + testUtils.ExecuteTestCase(t, test) +} + +func TestSchemaUpdates_WithBranchingSchemaAndSetActiveSchemaToOtherBranch(t *testing.T) { + schemaVersion1ID := "bafkreibjb4h5nudsei7cq2kkontjinmjpbqls2tmowqp5nxougu4tuus4i" + schemaVersion2ID := "bafkreibzozorw6lqjn5bjogsqxeqcswoqedcatdvphhts4frd7mb4jn7x4" + schemaVersion3ID := "bafkreiahizg44dgnuniim3y75ztjtj67kkezkit7w445lfpirx6iq6ixg4" + + test := testUtils.TestCase{ + Description: "Test schema update, with branching schema toggling between branches", + Actions: []any{ + testUtils.SchemaUpdate{ + Schema: ` + type Users { + name: String + } + `, + }, + testUtils.SchemaPatch{ + // The second schema version will not be set as the active version, leaving the initial version active + SetAsDefaultVersion: immutable.Some(false), + Patch: ` + [ + { "op": "add", "path": "/Users/Fields/-", "value": {"Name": "email", "Kind": 11} } + ] + `, + }, + testUtils.SchemaPatch{ + // The third schema version will be set as the active version, going from version 1 to 3 + SetAsDefaultVersion: immutable.Some(true), + Patch: ` + [ + { "op": "add", "path": "/Users/Fields/-", "value": {"Name": "phone", "Kind": 11} } + ] + `, + }, + testUtils.SetActiveSchemaVersion{ + // Set the second schema version to be active + SchemaVersionID: schemaVersion2ID, + }, + testUtils.Request{ + Request: `query { + Users { + name + email + } + }`, + // The email field is queriable + Results: []map[string]any{}, + }, + testUtils.Request{ + Request: `query { + Users { + name + phone + } + }`, + // The phone field is not queriable + ExpectedError: `Cannot query field "phone" on type "Users".`, + }, + testUtils.GetCollections{ + GetInactive: true, + ExpectedResults: []client.CollectionDescription{ + { + // The original collection version is present, it has no source and is inactive (has no name). + ID: 1, + SchemaVersionID: schemaVersion1ID, + }, + { + // The collection version for schema version 2 is present and is active, it has the first collection as a source + ID: 2, + Name: immutable.Some("Users"), + SchemaVersionID: schemaVersion2ID, + Sources: []any{ + &client.CollectionSource{ + SourceCollectionID: 1, + }, + }, + }, + { + // The collection version for schema version 3 is present and is inactive, it also has the first collection + // as source. + ID: 3, + SchemaVersionID: schemaVersion3ID, + Sources: []any{ + &client.CollectionSource{ + SourceCollectionID: 1, + }, + }, + }, + }, + }, + }, + } + testUtils.ExecuteTestCase(t, test) +} + +func TestSchemaUpdates_WithBranchingSchemaAndSetActiveSchemaToOtherBranchThenPatch(t *testing.T) { + schemaVersion1ID := "bafkreibjb4h5nudsei7cq2kkontjinmjpbqls2tmowqp5nxougu4tuus4i" + schemaVersion2ID := "bafkreibzozorw6lqjn5bjogsqxeqcswoqedcatdvphhts4frd7mb4jn7x4" + schemaVersion3ID := "bafkreiahizg44dgnuniim3y75ztjtj67kkezkit7w445lfpirx6iq6ixg4" + schemaVersion4ID := "bafkreigtg424aidykeyhty44b7b6arhsaewxcg6kfcw37jxigfwskxgf2e" + + test := testUtils.TestCase{ + Description: "Test schema update, with branching schema toggling between branches then patch", + Actions: []any{ + testUtils.SchemaUpdate{ + Schema: ` + type Users { + name: String + } + `, + }, + testUtils.SchemaPatch{ + // The second schema version will not be set as the active version, leaving the initial version active + SetAsDefaultVersion: immutable.Some(false), + Patch: ` + [ + { "op": "add", "path": "/Users/Fields/-", "value": {"Name": "email", "Kind": 11} } + ] + `, + }, + testUtils.SchemaPatch{ + // The third schema version will be set as the active version, going from version 1 to 3 + SetAsDefaultVersion: immutable.Some(true), + Patch: ` + [ + { "op": "add", "path": "/Users/Fields/-", "value": {"Name": "phone", "Kind": 11} } + ] + `, + }, + testUtils.SetActiveSchemaVersion{ + // Set the second schema version to be active + SchemaVersionID: schemaVersion2ID, + }, + testUtils.SchemaPatch{ + // The fourth schema version will be set as the active version, going from version 2 to 4 + SetAsDefaultVersion: immutable.Some(true), + Patch: ` + [ + { "op": "add", "path": "/Users/Fields/-", "value": {"Name": "discordName", "Kind": 11} } + ] + `, + }, + testUtils.Request{ + // The email and discordName fields are queriable + Request: `query { + Users { + name + email + discordName + } + }`, + Results: []map[string]any{}, + }, + testUtils.GetSchema{ + // The fourth schema version is present in the system, with the email and discordName field + VersionID: immutable.Some(schemaVersion4ID), + ExpectedResults: []client.SchemaDescription{ + { + Name: "Users", + VersionID: schemaVersion4ID, + Root: schemaVersion1ID, + Fields: []client.FieldDescription{ + { + Name: "_docID", + Kind: client.FieldKind_DocID, + Typ: client.LWW_REGISTER, + }, + { + Name: "name", + ID: 1, + Kind: client.FieldKind_NILLABLE_STRING, + Typ: client.LWW_REGISTER, + }, + { + Name: "email", + ID: 2, + Kind: client.FieldKind_NILLABLE_STRING, + Typ: client.LWW_REGISTER, + }, + { + Name: "discordName", + ID: 3, + Kind: client.FieldKind_NILLABLE_STRING, + Typ: client.LWW_REGISTER, + }, + }, + }, + }, + }, + testUtils.GetCollections{ + GetInactive: true, + ExpectedResults: []client.CollectionDescription{ + { + // The original collection version is present, it has no source and is inactive (has no name). + ID: 1, + SchemaVersionID: schemaVersion1ID, + }, + { + // The collection version for schema version 2 is present, it has the first collection as a source + // and is inactive. + ID: 2, + SchemaVersionID: schemaVersion2ID, + Sources: []any{ + &client.CollectionSource{ + SourceCollectionID: 1, + }, + }, + }, + { + // The collection version for schema version 3 is present and inactive, it has the first collection + // as source. + ID: 3, + SchemaVersionID: schemaVersion3ID, + Sources: []any{ + &client.CollectionSource{ + SourceCollectionID: 1, + }, + }, + }, + { + // The collection version for schema version 4 is present and is active, it also has the second collection + // as source. + ID: 4, + Name: immutable.Some("Users"), + SchemaVersionID: schemaVersion4ID, + Sources: []any{ + &client.CollectionSource{ + SourceCollectionID: 2, + }, + }, + }, + }, + }, + }, + } + testUtils.ExecuteTestCase(t, test) +} diff --git a/tests/integration/schema/with_update_set_default_test.go b/tests/integration/schema/with_update_set_default_test.go index d768830a5e..524e056755 100644 --- a/tests/integration/schema/with_update_set_default_test.go +++ b/tests/integration/schema/with_update_set_default_test.go @@ -36,7 +36,7 @@ func TestSchema_WithUpdateAndSetDefaultVersionToEmptyString_Errors(t *testing.T) ] `, }, - testUtils.SetDefaultSchemaVersion{ + testUtils.SetActiveSchemaVersion{ SchemaVersionID: "", ExpectedError: "schema version ID can't be empty", }, @@ -63,7 +63,7 @@ func TestSchema_WithUpdateAndSetDefaultVersionToUnknownVersion_Errors(t *testing ] `, }, - testUtils.SetDefaultSchemaVersion{ + testUtils.SetActiveSchemaVersion{ SchemaVersionID: "does not exist", ExpectedError: "datastore: key not found", }, @@ -91,7 +91,7 @@ func TestSchema_WithUpdateAndSetDefaultVersionToOriginal_NewFieldIsNotQueriable( `, SetAsDefaultVersion: immutable.Some(false), }, - testUtils.SetDefaultSchemaVersion{ + testUtils.SetActiveSchemaVersion{ SchemaVersionID: "bafkreibjb4h5nudsei7cq2kkontjinmjpbqls2tmowqp5nxougu4tuus4i", }, testUtils.Request{ @@ -128,7 +128,7 @@ func TestSchema_WithUpdateAndSetDefaultVersionToNew_AllowsQueryingOfNewField(t * `, SetAsDefaultVersion: immutable.Some(false), }, - testUtils.SetDefaultSchemaVersion{ + testUtils.SetActiveSchemaVersion{ SchemaVersionID: "bafkreibzozorw6lqjn5bjogsqxeqcswoqedcatdvphhts4frd7mb4jn7x4", }, testUtils.Request{ diff --git a/tests/integration/test_case.go b/tests/integration/test_case.go index 8c13cdcdae..5fea4d4478 100644 --- a/tests/integration/test_case.go +++ b/tests/integration/test_case.go @@ -13,6 +13,7 @@ package tests import ( "testing" + "github.com/lens-vm/lens/host-go/config/model" "github.com/sourcenetwork/immutable" "github.com/sourcenetwork/defradb/client" @@ -90,7 +91,10 @@ type SchemaPatch struct { // If SetAsDefaultVersion has a value, and that value is false then the schema version // resulting from this patch will not be made default. SetAsDefaultVersion immutable.Option[bool] - ExpectedError string + + Lens immutable.Option[model.Lens] + + ExpectedError string } // GetSchema is an action that fetches schema using the provided options. @@ -146,9 +150,9 @@ type GetCollections struct { ExpectedError string } -// SetDefaultSchemaVersion is an action that will set the default schema version to the +// SetActiveSchemaVersion is an action that will set the active schema version to the // given value. -type SetDefaultSchemaVersion struct { +type SetActiveSchemaVersion struct { // NodeID may hold the ID (index) of a node to set the default schema version on. // // If a value is not provided the default will be set on all nodes. diff --git a/tests/integration/utils2.go b/tests/integration/utils2.go index 30095c35be..e90a24dd59 100644 --- a/tests/integration/utils2.go +++ b/tests/integration/utils2.go @@ -265,8 +265,8 @@ func performAction( case GetCollections: getCollections(s, action) - case SetDefaultSchemaVersion: - setDefaultSchemaVersion(s, action) + case SetActiveSchemaVersion: + setActiveSchemaVersion(s, action) case CreateView: createView(s, action) @@ -274,9 +274,6 @@ func performAction( case ConfigureMigration: configureMigration(s, action) - case GetMigrations: - getMigrations(s, action) - case CreateDoc: createDoc(s, action) @@ -745,7 +742,7 @@ func refreshCollections( for nodeID, node := range s.nodes { s.collections[nodeID] = make([]client.Collection, len(s.collectionNames)) - allCollections, err := node.GetAllCollections(s.ctx) + allCollections, err := node.GetAllCollections(s.ctx, false) require.Nil(s.t, err) for i, collectionName := range s.collectionNames { @@ -1010,7 +1007,7 @@ func patchSchema( setAsDefaultVersion = true } - err := node.PatchSchema(s.ctx, action.Patch, setAsDefaultVersion) + err := node.PatchSchema(s.ctx, action.Patch, action.Lens, setAsDefaultVersion) expectedErrorRaised := AssertError(s.t, s.testCase.Description, err, action.ExpectedError) assertExpectedErrorRaised(s.t, s.testCase.Description, action.ExpectedError, expectedErrorRaised) @@ -1056,7 +1053,7 @@ func getCollections( ) { for _, node := range getNodes(action.NodeID, s.nodes) { db := getStore(s, node, action.TransactionID, "") - results, err := db.GetAllCollections(s.ctx) + results, err := db.GetAllCollections(s.ctx, action.GetInactive) expectedErrorRaised := AssertError(s.t, s.testCase.Description, err, action.ExpectedError) assertExpectedErrorRaised(s.t, s.testCase.Description, action.ExpectedError, expectedErrorRaised) @@ -1069,6 +1066,9 @@ func getCollections( if expected.ID != 0 { require.Equal(s.t, expected.ID, actual.ID) } + if expected.RootID != 0 { + require.Equal(s.t, expected.RootID, actual.RootID) + } if expected.SchemaVersionID != "" { require.Equal(s.t, expected.SchemaVersionID, actual.SchemaVersionID) } @@ -1091,12 +1091,12 @@ func getCollections( } } -func setDefaultSchemaVersion( +func setActiveSchemaVersion( s *state, - action SetDefaultSchemaVersion, + action SetActiveSchemaVersion, ) { for _, node := range getNodes(action.NodeID, s.nodes) { - err := node.SetDefaultSchemaVersion(s.ctx, action.SchemaVersionID) + err := node.SetActiveSchemaVersion(s.ctx, action.SchemaVersionID) expectedErrorRaised := AssertError(s.t, s.testCase.Description, err, action.ExpectedError) assertExpectedErrorRaised(s.t, s.testCase.Description, action.ExpectedError, expectedErrorRaised)