diff --git a/.github/workflows/release.yml b/.github/workflows/release.yml
index 6dd1a0ec00..bf8332107a 100644
--- a/.github/workflows/release.yml
+++ b/.github/workflows/release.yml
@@ -93,7 +93,7 @@ jobs:
if: matrix.os == 'macos-latest'
uses: actions/cache/save@v4
with:
- path: dist/darwin_amd64
+ path: dist/darwin_arm64
key: darwin-${{ env.sha_short }}
- name: Save cache on Windows
@@ -146,7 +146,7 @@ jobs:
id: restore-macos
uses: actions/cache/restore@v4
with:
- path: dist/darwin_amd64
+ path: dist/darwin_arm64
key: darwin-${{ env.sha_short }}
fail-on-cache-miss: true
diff --git a/.github/workflows/test-and-upload-coverage.yml b/.github/workflows/test-and-upload-coverage.yml
index 60858b1f86..491b674906 100644
--- a/.github/workflows/test-and-upload-coverage.yml
+++ b/.github/workflows/test-and-upload-coverage.yml
@@ -46,11 +46,13 @@ jobs:
database-type: badger-memory
mutation-type: collection-save
detect-changes: false
- - os: windows-latest
- client-type: go
- database-type: badger-memory
- mutation-type: collection-save
- detect-changes: false
+## TODO: https://github.com/sourcenetwork/defradb/issues/2080
+## Uncomment the lines below to Re-enable the windows build once this todo is resolved.
+## - os: windows-latest
+## client-type: go
+## database-type: badger-memory
+## mutation-type: collection-save
+## detect-changes: false
runs-on: ${{ matrix.os }}
diff --git a/CHANGELOG.md b/CHANGELOG.md
index 342cfb3a53..7345a58cc8 100644
--- a/CHANGELOG.md
+++ b/CHANGELOG.md
@@ -1,3 +1,70 @@
+
+
+## [v0.11.0](https://github.com/sourcenetwork/defradb/compare/v0.10.0...v0.11.0)
+
+> 2024-05-03
+
+DefraDB v0.11 is a major pre-production release. Until the stable version 1.0 is reached, the SemVer minor patch number will denote notable releases, which will give the project freedom to experiment and explore potentially breaking changes.
+
+To get a full outline of the changes, we invite you to review the official changelog below. This release does include a Breaking Change to existing v0.10.x databases. If you need help migrating an existing deployment, reach out at [hello@source.network](mailto:hello@source.network) or join our Discord at https://discord.gg/w7jYQVJ/.
+
+### Features
+
+* Update corelog to 0.0.7 ([#2547](https://github.com/sourcenetwork/defradb/issues/2547))
+* Move relation field properties onto collection ([#2529](https://github.com/sourcenetwork/defradb/issues/2529))
+* Lens runtime config ([#2497](https://github.com/sourcenetwork/defradb/issues/2497))
+* Add P Counter CRDT ([#2482](https://github.com/sourcenetwork/defradb/issues/2482))
+* Add Access Control Policy ([#2338](https://github.com/sourcenetwork/defradb/issues/2338))
+* Force explicit primary decl. in SDL for one-ones ([#2462](https://github.com/sourcenetwork/defradb/issues/2462))
+* Allow mutation of col sources via PatchCollection ([#2424](https://github.com/sourcenetwork/defradb/issues/2424))
+* Add Defra-Lens support for branching schema ([#2421](https://github.com/sourcenetwork/defradb/issues/2421))
+* Add PatchCollection ([#2402](https://github.com/sourcenetwork/defradb/issues/2402))
+
+### Fixes
+
+* Return correct results from one-many indexed filter ([#2579](https://github.com/sourcenetwork/defradb/issues/2579))
+* Handle compound filters on related indexed fields ([#2575](https://github.com/sourcenetwork/defradb/issues/2575))
+* Add check to filter result for logical ops ([#2573](https://github.com/sourcenetwork/defradb/issues/2573))
+* Make all array kinds nillable ([#2534](https://github.com/sourcenetwork/defradb/issues/2534))
+* Allow update when updating non-indexed field ([#2511](https://github.com/sourcenetwork/defradb/issues/2511))
+
+### Documentation
+
+* Add data definition document ([#2544](https://github.com/sourcenetwork/defradb/issues/2544))
+
+### Refactoring
+
+* Merge collection UpdateWith and DeleteWith ([#2531](https://github.com/sourcenetwork/defradb/issues/2531))
+* DB transactions context ([#2513](https://github.com/sourcenetwork/defradb/issues/2513))
+* Add NormalValue ([#2404](https://github.com/sourcenetwork/defradb/issues/2404))
+* Clean up client/request package ([#2443](https://github.com/sourcenetwork/defradb/issues/2443))
+* Rewrite convertImmutable ([#2445](https://github.com/sourcenetwork/defradb/issues/2445))
+* Unify Field Kind and Schema properties ([#2414](https://github.com/sourcenetwork/defradb/issues/2414))
+* Replace logging package with corelog ([#2406](https://github.com/sourcenetwork/defradb/issues/2406))
+
+### Testing
+
+* Add flag to skip network tests ([#2495](https://github.com/sourcenetwork/defradb/issues/2495))
+
+### Bot
+
+* Update dependencies (bulk dependabot PRs) 30-04-2024 ([#2570](https://github.com/sourcenetwork/defradb/issues/2570))
+* Bump [@typescript](https://github.com/typescript)-eslint/parser from 7.7.0 to 7.7.1 in /playground ([#2550](https://github.com/sourcenetwork/defradb/issues/2550))
+* Bump [@typescript](https://github.com/typescript)-eslint/eslint-plugin from 7.7.0 to 7.7.1 in /playground ([#2551](https://github.com/sourcenetwork/defradb/issues/2551))
+* Bump swagger-ui-react from 5.16.2 to 5.17.0 in /playground ([#2549](https://github.com/sourcenetwork/defradb/issues/2549))
+* Update dependencies (bulk dependabot PRs) 23-04-2023 ([#2548](https://github.com/sourcenetwork/defradb/issues/2548))
+* Bump go.opentelemetry.io/otel/sdk/metric from 1.24.0 to 1.25.0 ([#2499](https://github.com/sourcenetwork/defradb/issues/2499))
+* Bump typescript from 5.4.3 to 5.4.5 in /playground ([#2515](https://github.com/sourcenetwork/defradb/issues/2515))
+* Bump swagger-ui-react from 5.14.0 to 5.15.0 in /playground ([#2514](https://github.com/sourcenetwork/defradb/issues/2514))
+* Update dependencies (bulk dependabot PRs) 2024-04-09 ([#2509](https://github.com/sourcenetwork/defradb/issues/2509))
+* Update dependencies (bulk dependabot PRs) 2024-04-03 ([#2492](https://github.com/sourcenetwork/defradb/issues/2492))
+* Update dependencies (bulk dependabot PRs) 03-04-2024 ([#2486](https://github.com/sourcenetwork/defradb/issues/2486))
+* Bump github.com/multiformats/go-multiaddr from 0.12.2 to 0.12.3 ([#2480](https://github.com/sourcenetwork/defradb/issues/2480))
+* Bump [@types](https://github.com/types)/react from 18.2.66 to 18.2.67 in /playground ([#2427](https://github.com/sourcenetwork/defradb/issues/2427))
+* Bump [@typescript](https://github.com/typescript)-eslint/parser from 7.2.0 to 7.3.1 in /playground ([#2428](https://github.com/sourcenetwork/defradb/issues/2428))
+* Update dependencies (bulk dependabot PRs) 19-03-2024 ([#2426](https://github.com/sourcenetwork/defradb/issues/2426))
+* Update dependencies (bulk dependabot PRs) 03-11-2024 ([#2399](https://github.com/sourcenetwork/defradb/issues/2399))
+
## [v0.10.0](https://github.com/sourcenetwork/defradb/compare/v0.9.0...v0.10.0)
diff --git a/Makefile b/Makefile
index cde535be4b..658b514a4b 100644
--- a/Makefile
+++ b/Makefile
@@ -202,7 +202,7 @@ verify:
.PHONY: tidy
tidy:
- go mod tidy -go=1.21
+ go mod tidy -go=1.21.3
.PHONY: clean
clean:
diff --git a/README.md b/README.md
index a7156888b9..220c48f842 100644
--- a/README.md
+++ b/README.md
@@ -17,7 +17,6 @@ Read the documentation on [docs.source.network](https://docs.source.network/).
## Table of Contents
-- [Early Access](#early-access)
- [Install](#install)
- [Start](#start)
- [Configuration](#configuration)
@@ -32,12 +31,14 @@ Read the documentation on [docs.source.network](https://docs.source.network/).
- [Collection subscription example](#collection-subscription-example)
- [Replicator example](#replicator-example)
- [Securing the HTTP API with TLS](#securing-the-http-api-with-tls)
+- [Access Control System](#access-control-system)
- [Supporting CORS](#supporting-cors)
- [Backing up and restoring](#backing-up-and-restoring)
+- [Community](#community)
- [Licensing](#licensing)
- [Contributors](#contributors)
-DISCLAIMER: At this early stage, DefraDB does not offer access control or data encryption, and the default configuration exposes the database to the network. The software is provided "as is" and is not guaranteed to be stable, secure, or error-free. We encourage you to experiment with DefraDB and provide feedback, but please do not use it for production purposes until it has been thoroughly tested and developed.
+DISCLAIMER: At this early stage, DefraDB does not offer data encryption, and the default configuration exposes the database to the network. The software is provided "as is" and is not guaranteed to be stable, secure, or error-free. We encourage you to experiment with DefraDB and provide feedback, but please do not use it for production purposes until it has been thoroughly tested and developed.
## Install
@@ -397,6 +398,9 @@ defradb start --tls --pubkeypath ~/path-to-pubkey.key --privkeypath ~/path-to-pr
```
+## Access Control System
+Read more about the access control [here](./acp/README.md).
+
## Supporting CORS
When accessing DefraDB through a frontend interface, you may be confronted with a CORS error. That is because, by default, DefraDB will not have any allowed origins set. To specify which origins should be allowed to access your DefraDB endpoint, you can specify them when starting the database:
diff --git a/acp/README.md b/acp/README.md
new file mode 100644
index 0000000000..697a60a0c2
--- /dev/null
+++ b/acp/README.md
@@ -0,0 +1,442 @@
+# Introduction
+
+In the realm of information technology (IT) and cybersecurity, **access control** plays a pivotal role in ensuring the confidentiality, integrity, and availability of sensitive resources. Let's delve into why access control policies are crucial for protecting your valuable data.
+
+## What Is Access Control?
+
+**Access control** is a mechanism that regulates who or what can view, use, or access a specific resource within a computing environment. Its primary goal is to minimize security risks by ensuring that only **authorized users**, systems, or services have access to the resources they need. But it's more than just granting or denying access, it involves several key components:
+
+1. **Authentication**: Verifying the identity of an individual or system.
+2. **Authorization**: Determining what actions or operations an actor is allowed to perform.
+3. **Access**: Granting or denying access based on authorization.
+4. **Management**: Administering access rights and permissions.
+5. **Audit**: Tracking and monitoring access patterns for accountability.
+
+## Why Is Access Control Important?
+
+1. **Mitigating Security Risks**: Cybercriminals are becoming increasingly sophisticated, employing advanced techniques to breach security systems. By controlling who has access to your database, you significantly reduce the risk of unauthorized access, both from external attackers and insider threats.
+
+2. **Compliance with Regulations**: Various regulatory requirements, such as the **General Data Protection Regulation (GDPR)** and the **Health Insurance Portability and Accountability Act (HIPAA)**, mandate stringent access control measures to protect personal data. Implementing access control ensures compliance with these regulations.
+
+3. **Preventing Data Breaches**: Access control acts as a proactive measure to deter, detect, and prevent unauthorized access. It ensures that only those with the necessary permissions can access sensitive data or services.
+
+4. **Managing Complexity**: Modern IT infrastructure, including cloud computing and mobile devices, has exponentially increased the number of access points. Technologies like **identity and access management (IAM)** and approaches like **zero trust** help manage this complexity effectively.
+
+## Types of Security Access Controls
+
+Several access control models exist, including:
+
+- **Role-Based Access Control (RBAC)**: Assigns permissions to roles, roles then are granted to users. A user's active role then defines their access. (e.g., admin, user, manager).
+- **Attribute-Based Access Control (ABAC)**: Considers various attributes (e.g., user attributes, resource attributes) for access decisions.
+- **Discretionary Access Control (DAC)**: Users with sufficient permissions (resource owners) are to grant / share an object with other users.
+- **Mandatory Access Control (MAC)**: Users are not allowed to grant access to other users. Permissions are granted based on a minimum role / hierarchy (security labels and clearances) that must be met.
+- **Policy-Based Access Control (PBAC)**: Enforces access based on defined policies.
+- **Relation-Based Access Control (ReBac)**: Relations between objects and users in the system are used to derive their permissions.
+
+- Note: **DefraDB** access control rules strongly resembles **Discretionary Access Control (DAC)**, which is implemented through a **Relation-Based Access Control System (ReBac) Engine**
+
+## Challenges of Access Control in Cybersecurity
+
+- **Distributed IT Environments**: Cloud computing and remote work create new challenges.
+- **Rise of Mobility**: Mobile devices in the workplace add complexity.
+- **Password Fatigue**: Balancing security with usability.
+- **Data Governance**: Ensuring visibility and control.
+- **Multi-Tenancy**: Managing complex permissions in SaaS applications.
+
+## Key takeaway
+A robust access control policy system is your first line of defense against unauthorized access and data breaches.
+
+
+# DefraDB's Access Control System
+
+## ReBac Authorization Model
+
+### Zanzibar
+In 2019, Google published their [Zanzibar](https://research.google/pubs/zanzibar-googles-consistent-global-authorization-system/) paper, a paper explaining how they handle authorization across their many services. It uses access control lists but with relationship-based access control rather than role-based access control. Relationship-Based Access Control (ReBAC) establishes an authorization model where a subject's permission to access an object is defined by the presence of relationships between those subjects and objects.
+The way Zanzibar works is it exposes an API with (mainly) operations to manage `Relationships` (`tuples`) and Verify Access Requests (can Bob do X) through the `Check` call. A `tuple` includes subject, relation, and object. The Check call performs Graph Search over the `tuples` to find a path between the user and the object, if such a path exist then according to `RelBAC` the user has the queried permission. It operates as a Consistent and Partition-Tolerant System.
+
+### Zanzi
+However the Zanzibar API is centralized, so we (Source Network) created a decentralized implementation of Zanzibar called **Zanzi**. Which is powered by our SourceHub trust protocol. Zanzi is a general purpose Zanzibar implementation which operates over a KV persistence layer.
+
+### SourceHub ACP Module
+DefraDB wraps the `local` and `remote` SourceHub ACP Modules to bring all that magic to DefraDB.
+
+In order to setup the relation based access control, SourceHub requires an agreed upon contract which models the `relations`, `permissions`, and `actors`. That contract is refered to as a `SourceHub Policy`. The policy model's all the `relations` and `permissions` under a `resource`.
+A `resource` corresponds to that "thing" that we want to gate the access control around. This can be a `Type`, `Container`, `Schema`, `Shape` or anything that has Objects that need access control. Once the policy is finalized, it has to be uploaded to the `SourceHub Module` so it can be used.
+Once the `Policy` is uploaded to the `SourceHub Module` then an `Actor` can begin registering the `Object` for access control by linking to a `Resource` that exists on the uploaded `Policy`.
+After the `Object` is registered successfully, the `Actor` will then get a special built-in relation with that `Object` called the `"owner"` relation. This relation is given to the `Registerer` of an `Object`.
+Then an `Actor` can issue `Check` calls to see if they have access to an `Object`.
+
+## Document Access Control (DAC)
+In DefraDB's case we wanted to gate access control around the `Documents` that belonged to a specific `Collection`. Here, the `Collection` (i.e. the type/shape of the `Object`) can be thought of as the `Resource`, and the `Documents` are the `Objects`.
+
+
+## Field Access Control (FAC) (coming soon)
+We also want the ability to do a more granular access control than just DAC. Therefore we have `Field` level access control for situations where some fields of a `Document` need to be private, while others do not. In this case the `Document` becomes the `Resource` and the `Fields` are the `Objects` being gated.
+
+
+## Admin Access Control (AAC) (coming soon)
+We also want to model access control around the `Admin Level Operations` that exist in `DefraDB`. In this case the entire `Database` would be the `Resource` and the `Admin Level Operations` are the `Objects` being gated.
+
+A non-exhastive list of some operations only admins should have access for:
+- Ability to turnoff ACP
+- Ability to interact with the P2P system
+
+## SourceHub Policies Are Too Flexible
+SourceHub Policies are too flexible (atleast until the ability to define `Meta Policies` is implemented). This is because SourceHub leaves it up to the user to specify any type of `Permissions` and `Relations`. However for DefraDB, there are certain guarantees that **MUST** be maintained in order for the `Policy` to be effective. For example the user can input any name for a `Permission`, or `Relation` that DefraDB has no knowledge of. Another example is when a user might make a `Policy` that does not give any `Permission` to the `owner`. Which means in the case of DAC no one will have any access to the `Document` they created.
+Therefore There was a very clear need to define some rules while writing a `Resource` in a `Policy` which will be used with DefraDB's DAC, FAC, or AAC. These rules will guarantee that certain `Required Permissions` will always be there on a `Resource` and that `Owner` has the correct `Permissions`.
+
+We call these rules DPI A.K.A DefraDB Policy Interface.
+
+## Terminologies
+- 'SourceHub Address' is a `Bech32` Address with a specific SourceHub prefix.
+- 'Identity' is a combination of SourceHub Address and a Key-Pair Signature.
+- 'DPI' means 'DefraDB Policy Interface'.
+- 'Partially-DPI' policy means a policy with at least one DPI compliant resource.
+- 'Permissioned Collection' means to have a policy on the collection, like: `@policy(id:".." resource: "..")`
+- 'Permissioned Request' means to have a request with a SourceHub Identity.
+
+
+## DAC DPI Rules
+
+To qualify as a DPI-compliant `resource`, the following rules **MUST** be satisfied:
+- The resource **must include** the mandatory `registerer` (`owner`) relation within the `relations` attribute.
+- The resource **must encompass** all the required permissions under the `permissions` attribute.
+- Every required permission must have the required registerer relation (`owner`) in `expr`.
+- The required registerer relation **must be positioned** as the leading (first) relation in `expr` (see example below).
+- Any relation after the required registerer relation must only be a union set operation (`+`).
+
+For a `Policy` to be `DPI` compliant for DAC, all of its `resources` must be DPI compliant.
+To be `Partially-DPI` at least one of its `resource` must be DPI compliant.
+
+### More Into The Weeds:
+
+All mandatory permissions are:
+- Specified in the `dpi.go` file within the variable `dpiRequiredPermissions`.
+
+The name of the required 'registerer' relation is:
+- Specified in the `dpi.go` file within the variable `requiredRegistererRelationName`.
+
+### DPI Resource Examples:
+- Check out tests here: [tests/integration/acp/schema/add_dpi](/tests/integration/acp/schema/add_dpi)
+- The tests linked are broken into `accept_*_test.go` and `reject_*_test.go` files.
+- Accepted tests document the valid DPIs (as the schema is accepted).
+- Rejected tests document invalid DPIs (as the schema is rejected).
+- There are also some Partially-DPI tests that are both accepted and rejected depending on the resource.
+
+### Required Permission's Expression:
+Even though the following expressions are valid generic policy expressions, they will make a
+DPI compliant resource lose its DPI status as these expressions are not in accordance to
+our DPI [rules](#dac-dpi-rules). Assuming these `expr` are under a required permission label:
+- `expr: owner-owner`
+- `expr: owner-reader`
+- `expr: owner&reader`
+- `expr: owner - reader`
+- `expr: ownerMalicious + owner`
+- `expr: ownerMalicious`
+- `expr: owner_new`
+- `expr: reader+owner`
+- `expr: reader-owner`
+- `expr: reader - owner`
+
+Here are some valid expression examples. Assuming these `expr` are under a required permission label:
+- `expr: owner`
+- `expr: owner + reader`
+- `expr: owner +reader`
+- `expr: owner+reader`
+
+
+## DAC Usage CLI:
+
+### Adding a Policy:
+
+We have in `examples/dpi_policy/user_dpi_policy.yml`:
+```yaml
+description: A Valid DefraDB Policy Interface (DPI)
+
+actor:
+ name: actor
+
+resources:
+ users:
+ permissions:
+ read:
+ expr: owner + reader
+ write:
+ expr: owner
+
+ relations:
+ owner:
+ types:
+ - actor
+ reader:
+ types:
+ - actor
+```
+
+CLI Command:
+```sh
+defradb client acp policy add -i cosmos1f2djr7dl9vhrk3twt3xwqp09nhtzec9mdkf70j -f examples/dpi_policy/user_dpi_policy.yml
+
+```
+
+Result:
+```json
+{
+ "PolicyID": "24ab8cba6d6f0bcfe4d2712c7d95c09dd1b8076ea5a8896476413fd6c891c18c"
+}
+```
+
+### Add schema, linking to a resource within the policy we added:
+
+We have in `examples/schema/permissioned/users.graphql`:
+```graphql
+type Users @policy(
+ id: "24ab8cba6d6f0bcfe4d2712c7d95c09dd1b8076ea5a8896476413fd6c891c18c",
+ resource: "users"
+) {
+ name: String
+ age: Int
+}
+```
+
+CLI Command:
+```sh
+defradb client schema add -f examples/schema/permissioned/users.graphql
+```
+
+Result:
+```json
+[
+ {
+ "Name": "Users",
+ "ID": 1,
+ "RootID": 1,
+ "SchemaVersionID": "bafkreihhd6bqrjhl5zidwztgxzeseveplv3cj3fwtn3unjkdx7j2vr2vrq",
+ "Sources": [],
+ "Fields": [
+ {
+ "Name": "_docID",
+ "ID": 0
+ },
+ {
+ "Name": "age",
+ "ID": 1
+ },
+ {
+ "Name": "name",
+ "ID": 2
+ }
+ ],
+ "Indexes": [],
+ "Policy": {
+ "ID": "24ab8cba6d6f0bcfe4d2712c7d95c09dd1b8076ea5a8896476413fd6c891c18c",
+ "ResourceName": "users"
+ }
+ }
+]
+
+```
+
+### Create private documents (with identity)
+
+CLI Command:
+```sh
+defradb client collection create -i cosmos1f2djr7dl9vhrk3twt3xwqp09nhtzec9mdkf70j --name Users '[{ "name": "SecretShahzad" }, { "name": "SecretLone" }]'
+```
+
+### Create public documents (without identity)
+
+CLI Command:
+```sh
+defradb client collection create --name Users '[{ "name": "PublicShahzad" }, { "name": "PublicLone" }]'
+```
+
+### Get all docIDs without an identity (shows only public):
+CLI Command:
+```sh
+defradb client collection docIDs -i cosmos1f2djr7dl9vhrk3twt3xwqp09nhtzec9mdkf70j
+```
+
+Result:
+```json
+{
+ "docID": "bae-63ba68c9-78cb-5060-ab03-53ead1ec5b83",
+ "error": ""
+}
+{
+ "docID": "bae-ba315e98-fb37-5225-8a3b-34a1c75cba9e",
+ "error": ""
+}
+```
+
+
+### Get all docIDs with an identity (shows public and owned documents):
+```sh
+defradb client collection docIDs -i cosmos1f2djr7dl9vhrk3twt3xwqp09nhtzec9mdkf70j
+```
+
+Result:
+```json
+{
+ "docID": "bae-63ba68c9-78cb-5060-ab03-53ead1ec5b83",
+ "error": ""
+}
+{
+ "docID": "bae-a5830219-b8e7-5791-9836-2e494816fc0a",
+ "error": ""
+}
+{
+ "docID": "bae-ba315e98-fb37-5225-8a3b-34a1c75cba9e",
+ "error": ""
+}
+{
+ "docID": "bae-eafad571-e40c-55a7-bc41-3cf7d61ee891",
+ "error": ""
+}
+```
+
+
+### Access the private document (including field names):
+CLI Command:
+```sh
+defradb client collection get -i cosmos1f2djr7dl9vhrk3twt3xwqp09nhtzec9mdkf70j --name Users "bae-a5830219-b8e7-5791-9836-2e494816fc0a"
+```
+
+Result:
+```json
+{
+ "_docID": "bae-a5830219-b8e7-5791-9836-2e494816fc0a",
+ "name": "SecretShahzad"
+}
+```
+
+### Accessing the private document without an identity:
+CLI Command:
+```sh
+defradb client collection get --name Users "bae-a5830219-b8e7-5791-9836-2e494816fc0a"
+```
+
+Error:
+```
+ Error: document not found or not authorized to access
+```
+
+### Accessing the private document with wrong identity:
+CLI Command:
+```sh
+defradb client collection get -i cosmos1x25hhksxhu86r45hqwk28dd70qzux3262hdrll --name Users "bae-a5830219-b8e7-5791-9836-2e494816fc0a"
+```
+
+Error:
+```
+ Error: document not found or not authorized to access
+```
+
+### Update private document:
+CLI Command:
+```sh
+defradb client collection update -i cosmos1f2djr7dl9vhrk3twt3xwqp09nhtzec9mdkf70j --name Users --docID "bae-a5830219-b8e7-5791-9836-2e494816fc0a" --updater '{ "name": "SecretUpdatedShahzad" }'
+```
+
+Result:
+```json
+{
+ "Count": 1,
+ "DocIDs": [
+ "bae-a5830219-b8e7-5791-9836-2e494816fc0a"
+ ]
+}
+```
+
+#### Check if it actually got updated:
+CLI Command:
+```sh
+defradb client collection get -i cosmos1f2djr7dl9vhrk3twt3xwqp09nhtzec9mdkf70j --name Users "bae-a5830219-b8e7-5791-9836-2e494816fc0a"
+```
+
+Result:
+```json
+{
+ "_docID": "bae-a5830219-b8e7-5791-9836-2e494816fc0a",
+ "name": "SecretUpdatedShahzad"
+}
+```
+
+### Update With Filter example (coming soon)
+
+### Delete private document:
+CLI Command:
+```sh
+defradb client collection delete -i cosmos1f2djr7dl9vhrk3twt3xwqp09nhtzec9mdkf70j --name Users --docID "bae-a5830219-b8e7-5791-9836-2e494816fc0a"
+```
+
+Result:
+```json
+{
+ "Count": 1,
+ "DocIDs": [
+ "bae-a5830219-b8e7-5791-9836-2e494816fc0a"
+ ]
+}
+```
+
+#### Check if it actually got deleted:
+CLI Command:
+```sh
+defradb client collection get -i cosmos1f2djr7dl9vhrk3twt3xwqp09nhtzec9mdkf70j --name Users "bae-a5830219-b8e7-5791-9836-2e494816fc0a"
+```
+
+Error:
+```
+ Error: document not found or not authorized to access
+```
+
+### Delete With Filter example (coming soon)
+
+### Typejoin example (coming soon)
+
+### View example (coming soon)
+
+### P2P example (coming soon)
+
+### Backup / Import example (coming soon)
+
+### Secondary Indexes example (coming soon)
+
+### Execute Explain example (coming soon)
+
+
+## DAC Usage HTTP:
+HTTP requests work similar to their CLI counter parts, the main difference is that the identity will just be specified within the Auth Header like so: `Authorization: Basic `.
+
+Note: The `Basic` label will change to `Bearer ` after JWS Authentication Tokens are supported.
+
+## _AAC DPI Rules (coming soon)_
+## _AAC Usage: (coming soon)_
+
+## _FAC DPI Rules (coming soon)_
+## _FAC Usage: (coming soon)_
+
+## Warning / Caveats
+The following features currently don't work with ACP, they are being actively worked on.
+- [P2P: Adding a replicator with permissioned collection](https://github.com/sourcenetwork/defradb/issues/2366)
+- [P2P: Subscription to a permissioned collection](https://github.com/sourcenetwork/defradb/issues/2366)
+- [Adding Secondary Indexes](https://github.com/sourcenetwork/defradb/issues/2365)
+- [Backing/Restoring Private Documents](https://github.com/sourcenetwork/defradb/issues/2430)
+
+The following features may have undefined/unstable behavior until they are properly tested:
+- [Views](https://github.com/sourcenetwork/defradb/issues/2018)
+- [Average Operations](https://github.com/sourcenetwork/defradb/issues/2475)
+- [Count Operations](https://github.com/sourcenetwork/defradb/issues/2474)
+- [Group Operations](https://github.com/sourcenetwork/defradb/issues/2473)
+- [Limit Operations](https://github.com/sourcenetwork/defradb/issues/2472)
+- [Order Operations](https://github.com/sourcenetwork/defradb/issues/2471)
+- [Sum Operations](https://github.com/sourcenetwork/defradb/issues/2470)
+- [Dag/Commit Operations](https://github.com/sourcenetwork/defradb/issues/2469)
+- [Delete With Filter Operations](https://github.com/sourcenetwork/defradb/issues/2468)
+- [Update With Filter Operations](https://github.com/sourcenetwork/defradb/issues/2467)
+- [Type Join Many Operations](https://github.com/sourcenetwork/defradb/issues/2466)
+- [Type Join One Operations](https://github.com/sourcenetwork/defradb/issues/2466)
+- [Parallel Operations](https://github.com/sourcenetwork/defradb/issues/2465)
+- [Execute Explain](https://github.com/sourcenetwork/defradb/issues/2464)
diff --git a/acp/acp.go b/acp/acp.go
new file mode 100644
index 0000000000..af99bcb86f
--- /dev/null
+++ b/acp/acp.go
@@ -0,0 +1,100 @@
+// Copyright 2024 Democratized Data Foundation
+//
+// Use of this software is governed by the Business Source License
+// included in the file licenses/BSL.txt.
+//
+// As of the Change Date specified in that file, in accordance with
+// the Business Source License, use of this software will be governed
+// by the Apache License, Version 2.0, included in the file
+// licenses/APL.txt.
+
+package acp
+
+import (
+ "context"
+
+ "github.com/sourcenetwork/immutable"
+
+ "github.com/sourcenetwork/corelog"
+)
+
+var (
+ log = corelog.NewLogger("acp")
+
+ // NoACP is an empty ACP, this is used to disable access control.
+ NoACP = immutable.None[ACP]()
+)
+
+// ACP is the interface to all types of access control that might exist.
+type ACP interface {
+ // Init initializes the acp, with an absolute path. The provided path indicates where the
+ // persistent data will be stored for acp.
+ //
+ // If the path is empty then acp will run in memory.
+ Init(ctx context.Context, path string)
+
+ // Start starts the acp, using the initialized path. Will recover acp state
+ // from a previous run if under the same path.
+ //
+ // If the path is empty then acp will run in memory.
+ Start(ctx context.Context) error
+
+ // Close closes the resources in use by acp.
+ Close() error
+
+ // AddPolicy attempts to add the given policy. Detects the format of the policy automatically
+ // by assuming YAML format if JSON validation fails. Upon success a policyID is returned,
+ // otherwise returns error.
+ //
+ // A policy can not be added without a creator identity (sourcehub address).
+ AddPolicy(ctx context.Context, creatorID string, policy string) (string, error)
+
+ // ValidateResourceExistsOnValidDPI performs DPI validation of the resource (matching resource name)
+ // that is on the policy (matching policyID), returns an error upon validation failure.
+ //
+ // Learn more about the DefraDB Policy Interface [DPI](/acp/README.md)
+ ValidateResourceExistsOnValidDPI(
+ ctx context.Context,
+ policyID string,
+ resourceName string,
+ ) error
+
+ // RegisterDocObject registers the document (object) to have access control.
+ // No error is returned upon successful registering of a document.
+ //
+ // Note(s):
+ // - This function does not check the collection to see if the document actually exists.
+ // - Some documents might be created without an identity signature so they would have public access.
+ // - actorID here is the identity of the actor registering the document object.
+ RegisterDocObject(
+ ctx context.Context,
+ actorID string,
+ policyID string,
+ resourceName string,
+ docID string,
+ ) error
+
+ // IsDocRegistered returns true if the document was found to be registered, otherwise returns false.
+ // If check failed then an error and false will be returned.
+ IsDocRegistered(
+ ctx context.Context,
+ policyID string,
+ resourceName string,
+ docID string,
+ ) (bool, error)
+
+ // CheckDocAccess returns true if the check was successfull and the request has access to the document. If
+ // the check was successful but the request does not have access to the document, then returns false.
+ // Otherwise if check failed then an error is returned (and the boolean result should not be used).
+ //
+ // Note(s):
+ // - permission here is a valid DPI permission we are checking for ("read" or "write").
+ CheckDocAccess(
+ ctx context.Context,
+ permission DPIPermission,
+ actorID string,
+ policyID string,
+ resourceName string,
+ docID string,
+ ) (bool, error)
+}
diff --git a/acp/acp_local.go b/acp/acp_local.go
new file mode 100644
index 0000000000..e569efd5d0
--- /dev/null
+++ b/acp/acp_local.go
@@ -0,0 +1,310 @@
+// Copyright 2024 Democratized Data Foundation
+//
+// Use of this software is governed by the Business Source License
+// included in the file licenses/BSL.txt.
+//
+// As of the Change Date specified in that file, in accordance with
+// the Business Source License, use of this software will be governed
+// by the Apache License, Version 2.0, included in the file
+// licenses/APL.txt.
+
+package acp
+
+import (
+ "context"
+
+ protoTypes "github.com/cosmos/gogoproto/types"
+ "github.com/sourcenetwork/corelog"
+ "github.com/sourcenetwork/immutable"
+ "github.com/sourcenetwork/sourcehub/x/acp/embedded"
+ "github.com/sourcenetwork/sourcehub/x/acp/types"
+ "github.com/valyala/fastjson"
+
+ "github.com/sourcenetwork/defradb/errors"
+)
+
+var (
+ _ ACP = (*ACPLocal)(nil)
+)
+
+// ACPLocal represents a local acp implementation that makes no remote calls.
+type ACPLocal struct {
+ pathToStore immutable.Option[string]
+ localACP *embedded.LocalACP
+}
+
+func (l *ACPLocal) Init(ctx context.Context, path string) {
+ if path == "" {
+ l.pathToStore = immutable.None[string]()
+ } else {
+ l.pathToStore = immutable.Some(path)
+ }
+}
+
+func (l *ACPLocal) Start(ctx context.Context) error {
+ var localACP embedded.LocalACP
+ var err error
+
+ if !l.pathToStore.HasValue() { // Use a non-persistent, i.e. in memory store.
+ localACP, err = embedded.NewLocalACP(
+ embedded.WithInMemStore(),
+ )
+
+ if err != nil {
+ return NewErrInitializationOfACPFailed(err, "Local", "in-memory")
+ }
+ } else { // Use peristent storage.
+ acpStorePath := l.pathToStore.Value() + "/" + embedded.DefaultDataDir
+ localACP, err = embedded.NewLocalACP(
+ embedded.WithPersistentStorage(acpStorePath),
+ )
+ if err != nil {
+ return NewErrInitializationOfACPFailed(err, "Local", l.pathToStore.Value())
+ }
+ }
+
+ l.localACP = &localACP
+ return nil
+}
+
+func (l *ACPLocal) Close() error {
+ return l.localACP.Close()
+}
+
+func (l *ACPLocal) AddPolicy(
+ ctx context.Context,
+ creatorID string,
+ policy string,
+) (string, error) {
+ // Having a creator identity is a MUST requirement for adding a policy.
+ if creatorID == "" {
+ return "", ErrPolicyCreatorMustNotBeEmpty
+ }
+
+ if policy == "" {
+ return "", ErrPolicyDataMustNotBeEmpty
+ }
+
+ // Assume policy is in YAML format by default.
+ policyMarshalType := types.PolicyMarshalingType_SHORT_YAML
+ if isJSON := fastjson.Validate(policy) == nil; isJSON { // Detect JSON format.
+ policyMarshalType = types.PolicyMarshalingType_SHORT_JSON
+ }
+
+ createPolicy := types.MsgCreatePolicy{
+ Creator: creatorID,
+ Policy: policy,
+ MarshalType: policyMarshalType,
+ CreationTime: protoTypes.TimestampNow(),
+ }
+
+ createPolicyResponse, err := l.localACP.GetMsgService().CreatePolicy(
+ l.localACP.GetCtx(),
+ &createPolicy,
+ )
+
+ if err != nil {
+ return "", NewErrFailedToAddPolicyWithACP(err, "Local", creatorID)
+ }
+
+ policyID := createPolicyResponse.Policy.Id
+ log.InfoContext(ctx, "Created Policy", corelog.Any("PolicyID", policyID))
+
+ return policyID, nil
+}
+
+func (l *ACPLocal) ValidateResourceExistsOnValidDPI(
+ ctx context.Context,
+ policyID string,
+ resourceName string,
+) error {
+ if policyID == "" && resourceName == "" {
+ return ErrNoPolicyArgs
+ }
+
+ if policyID == "" {
+ return ErrPolicyIDMustNotBeEmpty
+ }
+
+ if resourceName == "" {
+ return ErrResourceNameMustNotBeEmpty
+ }
+
+ queryPolicyRequest := types.QueryPolicyRequest{Id: policyID}
+ queryPolicyResponse, err := l.localACP.GetQueryService().Policy(
+ l.localACP.GetCtx(),
+ &queryPolicyRequest,
+ )
+
+ if err != nil {
+ if errors.Is(err, types.ErrPolicyNotFound) {
+ return newErrPolicyDoesNotExistWithACP(err, policyID)
+ } else {
+ return newErrPolicyValidationFailedWithACP(err, policyID)
+ }
+ }
+
+ // So far we validated that the policy exists, now lets validate that resource exists.
+ resourceResponse := queryPolicyResponse.Policy.GetResourceByName(resourceName)
+ if resourceResponse == nil {
+ return newErrResourceDoesNotExistOnTargetPolicy(resourceName, policyID)
+ }
+
+ // Now that we have validated that policyID exists and it contains a corresponding
+ // resource with the matching name, validate that all required permissions
+ // for DPI actually exist on the target resource.
+ for _, requiredPermission := range dpiRequiredPermissions {
+ permissionResponse := resourceResponse.GetPermissionByName(requiredPermission)
+ if permissionResponse == nil {
+ return newErrResourceIsMissingRequiredPermission(
+ resourceName,
+ requiredPermission,
+ policyID,
+ )
+ }
+
+ // Now we need to ensure that the "owner" relation has access to all the required
+ // permissions for DPI. This is important because even if the policy has the required
+ // permissions under the resource, it's possible that those permissions are not granted
+ // to the "owner" relation, this will help users not shoot themseleves in the foot.
+ // TODO-ACP: Better validation, once sourcehub implements meta-policies.
+ // Issue: https://github.com/sourcenetwork/defradb/issues/2359
+ if err := validateDPIExpressionOfRequiredPermission(
+ permissionResponse.Expression,
+ requiredPermission,
+ ); err != nil {
+ return err
+ }
+ }
+
+ return nil
+}
+
+func (l *ACPLocal) RegisterDocObject(
+ ctx context.Context,
+ actorID string,
+ policyID string,
+ resourceName string,
+ docID string,
+) error {
+ registerDoc := types.MsgRegisterObject{
+ Creator: actorID,
+ PolicyId: policyID,
+ Object: types.NewObject(resourceName, docID),
+ CreationTime: protoTypes.TimestampNow(),
+ }
+
+ registerDocResponse, err := l.localACP.GetMsgService().RegisterObject(
+ l.localACP.GetCtx(),
+ ®isterDoc,
+ )
+
+ if err != nil {
+ return NewErrFailedToRegisterDocWithACP(err, "Local", policyID, actorID, resourceName, docID)
+ }
+
+ switch registerDocResponse.Result {
+ case types.RegistrationResult_NoOp:
+ return ErrObjectDidNotRegister
+
+ case types.RegistrationResult_Registered:
+ log.InfoContext(
+ ctx,
+ "Document registered with local acp",
+ corelog.Any("PolicyID", policyID),
+ corelog.Any("Creator", actorID),
+ corelog.Any("Resource", resourceName),
+ corelog.Any("DocID", docID),
+ )
+ return nil
+
+ case types.RegistrationResult_Unarchived:
+ log.InfoContext(
+ ctx,
+ "Document re-registered (unarchived object) with local acp",
+ corelog.Any("PolicyID", policyID),
+ corelog.Any("Creator", actorID),
+ corelog.Any("Resource", resourceName),
+ corelog.Any("DocID", docID),
+ )
+ return nil
+ }
+
+ return ErrObjectDidNotRegister
+}
+
+func (l *ACPLocal) IsDocRegistered(
+ ctx context.Context,
+ policyID string,
+ resourceName string,
+ docID string,
+) (bool, error) {
+ queryObjectOwner := types.QueryObjectOwnerRequest{
+ PolicyId: policyID,
+ Object: types.NewObject(resourceName, docID),
+ }
+
+ queryObjectOwnerResponse, err := l.localACP.GetQueryService().ObjectOwner(
+ l.localACP.GetCtx(),
+ &queryObjectOwner,
+ )
+ if err != nil {
+ return false, NewErrFailedToCheckIfDocIsRegisteredWithACP(err, "Local", policyID, resourceName, docID)
+ }
+
+ return queryObjectOwnerResponse.IsRegistered, nil
+}
+
+func (l *ACPLocal) CheckDocAccess(
+ ctx context.Context,
+ permission DPIPermission,
+ actorID string,
+ policyID string,
+ resourceName string,
+ docID string,
+) (bool, error) {
+ checkDoc := types.QueryVerifyAccessRequestRequest{
+ PolicyId: policyID,
+ AccessRequest: &types.AccessRequest{
+ Operations: []*types.Operation{
+ {
+ Object: types.NewObject(resourceName, docID),
+ Permission: permission.String(),
+ },
+ },
+ Actor: &types.Actor{
+ Id: actorID,
+ },
+ },
+ }
+
+ checkDocResponse, err := l.localACP.GetQueryService().VerifyAccessRequest(
+ l.localACP.GetCtx(),
+ &checkDoc,
+ )
+ if err != nil {
+ return false, NewErrFailedToVerifyDocAccessWithACP(err, "Local", policyID, actorID, resourceName, docID)
+ }
+
+ if checkDocResponse.Valid {
+ log.InfoContext(
+ ctx,
+ "Document accessible",
+ corelog.Any("PolicyID", policyID),
+ corelog.Any("ActorID", actorID),
+ corelog.Any("Resource", resourceName),
+ corelog.Any("DocID", docID),
+ )
+ return true, nil
+ } else {
+ log.InfoContext(
+ ctx,
+ "Document inaccessible",
+ corelog.Any("PolicyID", policyID),
+ corelog.Any("ActorID", actorID),
+ corelog.Any("Resource", resourceName),
+ corelog.Any("DocID", docID),
+ )
+ return false, nil
+ }
+}
diff --git a/acp/acp_local_test.go b/acp/acp_local_test.go
new file mode 100644
index 0000000000..9abdcb04d1
--- /dev/null
+++ b/acp/acp_local_test.go
@@ -0,0 +1,654 @@
+// Copyright 2024 Democratized Data Foundation
+//
+// Use of this software is governed by the Business Source License
+// included in the file licenses/BSL.txt.
+//
+// As of the Change Date specified in that file, in accordance with
+// the Business Source License, use of this software will be governed
+// by the Apache License, Version 2.0, included in the file
+// licenses/APL.txt.
+
+package acp
+
+import (
+ "context"
+ "testing"
+
+ "github.com/stretchr/testify/require"
+)
+
+var identity1 = "cosmos1zzg43wdrhmmk89z3pmejwete2kkd4a3vn7w969"
+var identity2 = "cosmos1x25hhksxhu86r45hqwk28dd70qzux3262hdrll"
+
+var validPolicyID string = "4f13c5084c3d0e1e5c5db702fceef84c3b6ab948949ca8e27fcaad3fb8bc39f4"
+var validPolicy string = `
+description: a policy
+
+actor:
+ name: actor
+
+resources:
+ users:
+ permissions:
+ write:
+ expr: owner
+ read:
+ expr: owner + reader
+
+ relations:
+ owner:
+ types:
+ - actor
+ reader:
+ types:
+ - actor
+ `
+
+func Test_LocalACP_InMemory_StartAndClose_NoError(t *testing.T) {
+ ctx := context.Background()
+ var localACP ACPLocal
+
+ localACP.Init(ctx, "")
+ err := localACP.Start(ctx)
+
+ require.Nil(t, err)
+
+ err = localACP.Close()
+ require.Nil(t, err)
+}
+
+func Test_LocalACP_PersistentMemory_StartAndClose_NoError(t *testing.T) {
+ acpPath := t.TempDir()
+ require.NotEqual(t, "", acpPath)
+
+ ctx := context.Background()
+ var localACP ACPLocal
+
+ localACP.Init(ctx, acpPath)
+ err := localACP.Start(ctx)
+ require.Nil(t, err)
+
+ err = localACP.Close()
+ require.Nil(t, err)
+}
+
+func Test_LocalACP_InMemory_AddPolicy_CanCreateTwice(t *testing.T) {
+ ctx := context.Background()
+ var localACP ACPLocal
+
+ localACP.Init(ctx, "")
+ errStart := localACP.Start(ctx)
+ require.Nil(t, errStart)
+
+ policyID, errAddPolicy := localACP.AddPolicy(
+ ctx,
+ identity1,
+ validPolicy,
+ )
+ require.Nil(t, errAddPolicy)
+
+ require.Equal(
+ t,
+ validPolicyID,
+ policyID,
+ )
+
+ errClose := localACP.Close()
+ require.Nil(t, errClose)
+
+ // Since nothing is persisted should allow adding same policy again.
+
+ localACP.Init(ctx, "")
+ errStart = localACP.Start(ctx)
+ require.Nil(t, errStart)
+
+ policyID, errAddPolicy = localACP.AddPolicy(
+ ctx,
+ identity1,
+ validPolicy,
+ )
+ require.Nil(t, errAddPolicy)
+ require.Equal(
+ t,
+ validPolicyID,
+ policyID,
+ )
+
+ errClose = localACP.Close()
+ require.Nil(t, errClose)
+}
+
+func Test_LocalACP_PersistentMemory_AddPolicy_CanNotCreateTwice(t *testing.T) {
+ acpPath := t.TempDir()
+ require.NotEqual(t, "", acpPath)
+
+ ctx := context.Background()
+ var localACP ACPLocal
+
+ localACP.Init(ctx, acpPath)
+ errStart := localACP.Start(ctx)
+ require.Nil(t, errStart)
+
+ policyID, errAddPolicy := localACP.AddPolicy(
+ ctx,
+ identity1,
+ validPolicy,
+ )
+ require.Nil(t, errAddPolicy)
+ require.Equal(
+ t,
+ validPolicyID,
+ policyID,
+ )
+
+ errClose := localACP.Close()
+ require.Nil(t, errClose)
+
+ // The above policy should remain persisted on restarting ACP.
+
+ localACP.Init(ctx, acpPath)
+ errStart = localACP.Start(ctx)
+ require.Nil(t, errStart)
+
+ // Should not allow us to create the same policy again as it exists already.
+ _, errAddPolicy = localACP.AddPolicy(
+ ctx,
+ identity1,
+ validPolicy,
+ )
+ require.Error(t, errAddPolicy)
+ require.ErrorIs(t, errAddPolicy, ErrFailedToAddPolicyWithACP)
+
+ errClose = localACP.Close()
+ require.Nil(t, errClose)
+}
+
+func Test_LocalACP_InMemory_ValidateResourseExistsOrNot_ErrIfDoesntExist(t *testing.T) {
+ ctx := context.Background()
+ var localACP ACPLocal
+
+ localACP.Init(ctx, "")
+ errStart := localACP.Start(ctx)
+ require.Nil(t, errStart)
+
+ policyID, errAddPolicy := localACP.AddPolicy(
+ ctx,
+ identity1,
+ validPolicy,
+ )
+ require.Nil(t, errAddPolicy)
+ require.Equal(
+ t,
+ validPolicyID,
+ policyID,
+ )
+
+ errValidateResourceExists := localACP.ValidateResourceExistsOnValidDPI(
+ ctx,
+ validPolicyID,
+ "users",
+ )
+ require.Nil(t, errValidateResourceExists)
+
+ errValidateResourceExists = localACP.ValidateResourceExistsOnValidDPI(
+ ctx,
+ validPolicyID,
+ "resourceDoesNotExist",
+ )
+ require.Error(t, errValidateResourceExists)
+ require.ErrorIs(t, errValidateResourceExists, ErrResourceDoesNotExistOnTargetPolicy)
+
+ errValidateResourceExists = localACP.ValidateResourceExistsOnValidDPI(
+ ctx,
+ "invalidPolicyID",
+ "resourceDoesNotExist",
+ )
+ require.Error(t, errValidateResourceExists)
+ require.ErrorIs(t, errValidateResourceExists, ErrPolicyDoesNotExistWithACP)
+
+ errClose := localACP.Close()
+ require.Nil(t, errClose)
+}
+
+func Test_LocalACP_PersistentMemory_ValidateResourseExistsOrNot_ErrIfDoesntExist(t *testing.T) {
+ acpPath := t.TempDir()
+ require.NotEqual(t, "", acpPath)
+
+ ctx := context.Background()
+ var localACP ACPLocal
+
+ localACP.Init(ctx, acpPath)
+ errStart := localACP.Start(ctx)
+ require.Nil(t, errStart)
+
+ policyID, errAddPolicy := localACP.AddPolicy(
+ ctx,
+ identity1,
+ validPolicy,
+ )
+ require.Nil(t, errAddPolicy)
+ require.Equal(
+ t,
+ validPolicyID,
+ policyID,
+ )
+
+ errValidateResourceExists := localACP.ValidateResourceExistsOnValidDPI(
+ ctx,
+ validPolicyID,
+ "users",
+ )
+ require.Nil(t, errValidateResourceExists)
+
+ // Resource should still exist even after a restart.
+ errClose := localACP.Close()
+ require.Nil(t, errClose)
+
+ localACP.Init(ctx, acpPath)
+ errStart = localACP.Start(ctx)
+ require.Nil(t, errStart)
+
+ // Do the same check after restart.
+ errValidateResourceExists = localACP.ValidateResourceExistsOnValidDPI(
+ ctx,
+ validPolicyID,
+ "users",
+ )
+ require.Nil(t, errValidateResourceExists)
+
+ errValidateResourceExists = localACP.ValidateResourceExistsOnValidDPI(
+ ctx,
+ validPolicyID,
+ "resourceDoesNotExist",
+ )
+ require.Error(t, errValidateResourceExists)
+ require.ErrorIs(t, errValidateResourceExists, ErrResourceDoesNotExistOnTargetPolicy)
+
+ errValidateResourceExists = localACP.ValidateResourceExistsOnValidDPI(
+ ctx,
+ "invalidPolicyID",
+ "resourceDoesNotExist",
+ )
+ require.Error(t, errValidateResourceExists)
+ require.ErrorIs(t, errValidateResourceExists, ErrPolicyDoesNotExistWithACP)
+
+ errClose = localACP.Close()
+ require.Nil(t, errClose)
+}
+
+func Test_LocalACP_InMemory_IsDocRegistered_TrueIfRegisteredFalseIfNotAndErrorOtherwise(t *testing.T) {
+ ctx := context.Background()
+ var localACP ACPLocal
+
+ localACP.Init(ctx, "")
+ errStart := localACP.Start(ctx)
+ require.Nil(t, errStart)
+
+ policyID, errAddPolicy := localACP.AddPolicy(
+ ctx,
+ identity1,
+ validPolicy,
+ )
+ require.Nil(t, errAddPolicy)
+ require.Equal(
+ t,
+ validPolicyID,
+ policyID,
+ )
+
+ // Invalid empty doc and empty resource can't be registered.
+ errRegisterDoc := localACP.RegisterDocObject(
+ ctx,
+ identity1,
+ validPolicyID,
+ "",
+ "",
+ )
+ require.Error(t, errRegisterDoc)
+ require.ErrorIs(t, errRegisterDoc, ErrFailedToRegisterDocWithACP)
+
+ // Check if an invalid empty doc and empty resource is registered.
+ isDocRegistered, errDocRegistered := localACP.IsDocRegistered(
+ ctx,
+ validPolicyID,
+ "",
+ "",
+ )
+ require.Error(t, errDocRegistered)
+ require.ErrorIs(t, errDocRegistered, ErrFailedToCheckIfDocIsRegisteredWithACP)
+ require.False(t, isDocRegistered)
+
+ // No documents are registered right now so return false.
+ isDocRegistered, errDocRegistered = localACP.IsDocRegistered(
+ ctx,
+ validPolicyID,
+ "users",
+ "documentID_XYZ",
+ )
+ require.Nil(t, errDocRegistered)
+ require.False(t, isDocRegistered)
+
+ // Register a document.
+ errRegisterDoc = localACP.RegisterDocObject(
+ ctx,
+ identity1,
+ validPolicyID,
+ "users",
+ "documentID_XYZ",
+ )
+ require.Nil(t, errRegisterDoc)
+
+ // Now it should be registered.
+ isDocRegistered, errDocRegistered = localACP.IsDocRegistered(
+ ctx,
+ validPolicyID,
+ "users",
+ "documentID_XYZ",
+ )
+
+ require.Nil(t, errDocRegistered)
+ require.True(t, isDocRegistered)
+
+ errClose := localACP.Close()
+ require.Nil(t, errClose)
+}
+
+func Test_LocalACP_PersistentMemory_IsDocRegistered_TrueIfRegisteredFalseIfNotAndErrorOtherwise(t *testing.T) {
+ acpPath := t.TempDir()
+ require.NotEqual(t, "", acpPath)
+
+ ctx := context.Background()
+ var localACP ACPLocal
+
+ localACP.Init(ctx, acpPath)
+ errStart := localACP.Start(ctx)
+ require.Nil(t, errStart)
+
+ policyID, errAddPolicy := localACP.AddPolicy(
+ ctx,
+ identity1,
+ validPolicy,
+ )
+ require.Nil(t, errAddPolicy)
+ require.Equal(
+ t,
+ validPolicyID,
+ policyID,
+ )
+
+ // Invalid empty doc and empty resource can't be registered.
+ errRegisterDoc := localACP.RegisterDocObject(
+ ctx,
+ identity1,
+ validPolicyID,
+ "",
+ "",
+ )
+ require.Error(t, errRegisterDoc)
+ require.ErrorIs(t, errRegisterDoc, ErrFailedToRegisterDocWithACP)
+
+ // Check if an invalid empty doc and empty resource is registered.
+ isDocRegistered, errDocRegistered := localACP.IsDocRegistered(
+ ctx,
+ validPolicyID,
+ "",
+ "",
+ )
+ require.Error(t, errDocRegistered)
+ require.ErrorIs(t, errDocRegistered, ErrFailedToCheckIfDocIsRegisteredWithACP)
+ require.False(t, isDocRegistered)
+
+ // No documents are registered right now so return false.
+ isDocRegistered, errDocRegistered = localACP.IsDocRegistered(
+ ctx,
+ validPolicyID,
+ "users",
+ "documentID_XYZ",
+ )
+ require.Nil(t, errDocRegistered)
+ require.False(t, isDocRegistered)
+
+ // Register a document.
+ errRegisterDoc = localACP.RegisterDocObject(
+ ctx,
+ identity1,
+ validPolicyID,
+ "users",
+ "documentID_XYZ",
+ )
+ require.Nil(t, errRegisterDoc)
+
+ // Now it should be registered.
+ isDocRegistered, errDocRegistered = localACP.IsDocRegistered(
+ ctx,
+ validPolicyID,
+ "users",
+ "documentID_XYZ",
+ )
+
+ require.Nil(t, errDocRegistered)
+ require.True(t, isDocRegistered)
+
+ // Should stay registered even after a restart.
+ errClose := localACP.Close()
+ require.Nil(t, errClose)
+
+ localACP.Init(ctx, acpPath)
+ errStart = localACP.Start(ctx)
+ require.Nil(t, errStart)
+
+ // Check after restart if it is still registered.
+ isDocRegistered, errDocRegistered = localACP.IsDocRegistered(
+ ctx,
+ validPolicyID,
+ "users",
+ "documentID_XYZ",
+ )
+
+ require.Nil(t, errDocRegistered)
+ require.True(t, isDocRegistered)
+
+ errClose = localACP.Close()
+ require.Nil(t, errClose)
+}
+
+func Test_LocalACP_InMemory_CheckDocAccess_TrueIfHaveAccessFalseIfNotErrorOtherwise(t *testing.T) {
+ ctx := context.Background()
+ var localACP ACPLocal
+
+ localACP.Init(ctx, "")
+ errStart := localACP.Start(ctx)
+ require.Nil(t, errStart)
+
+ policyID, errAddPolicy := localACP.AddPolicy(
+ ctx,
+ identity1,
+ validPolicy,
+ )
+ require.Nil(t, errAddPolicy)
+ require.Equal(
+ t,
+ validPolicyID,
+ policyID,
+ )
+
+ // Invalid empty arguments such that we can't check doc access.
+ hasAccess, errCheckDocAccess := localACP.CheckDocAccess(
+ ctx,
+ ReadPermission,
+ identity1,
+ validPolicyID,
+ "",
+ "",
+ )
+ require.Error(t, errCheckDocAccess)
+ require.ErrorIs(t, errCheckDocAccess, ErrFailedToVerifyDocAccessWithACP)
+ require.False(t, hasAccess)
+
+ // Check document accesss for a document that does not exist.
+ hasAccess, errCheckDocAccess = localACP.CheckDocAccess(
+ ctx,
+ ReadPermission,
+ identity1,
+ validPolicyID,
+ "users",
+ "documentID_XYZ",
+ )
+ require.Nil(t, errCheckDocAccess)
+ require.False(t, hasAccess)
+
+ // Register a document.
+ errRegisterDoc := localACP.RegisterDocObject(
+ ctx,
+ identity1,
+ validPolicyID,
+ "users",
+ "documentID_XYZ",
+ )
+ require.Nil(t, errRegisterDoc)
+
+ // Now check using correct identity if it has access.
+ hasAccess, errCheckDocAccess = localACP.CheckDocAccess(
+ ctx,
+ ReadPermission,
+ identity1,
+ validPolicyID,
+ "users",
+ "documentID_XYZ",
+ )
+ require.Nil(t, errCheckDocAccess)
+ require.True(t, hasAccess)
+
+ // Now check using wrong identity, it should not have access.
+ hasAccess, errCheckDocAccess = localACP.CheckDocAccess(
+ ctx,
+ ReadPermission,
+ identity2,
+ validPolicyID,
+ "users",
+ "documentID_XYZ",
+ )
+ require.Nil(t, errCheckDocAccess)
+ require.False(t, hasAccess)
+
+ errClose := localACP.Close()
+ require.Nil(t, errClose)
+}
+
+func Test_LocalACP_PersistentMemory_CheckDocAccess_TrueIfHaveAccessFalseIfNotErrorOtherwise(t *testing.T) {
+ acpPath := t.TempDir()
+ require.NotEqual(t, "", acpPath)
+
+ ctx := context.Background()
+ var localACP ACPLocal
+
+ localACP.Init(ctx, acpPath)
+ errStart := localACP.Start(ctx)
+ require.Nil(t, errStart)
+
+ policyID, errAddPolicy := localACP.AddPolicy(
+ ctx,
+ identity1,
+ validPolicy,
+ )
+ require.Nil(t, errAddPolicy)
+ require.Equal(
+ t,
+ validPolicyID,
+ policyID,
+ )
+
+ // Invalid empty arguments such that we can't check doc access.
+ hasAccess, errCheckDocAccess := localACP.CheckDocAccess(
+ ctx,
+ ReadPermission,
+ identity1,
+ validPolicyID,
+ "",
+ "",
+ )
+ require.Error(t, errCheckDocAccess)
+ require.ErrorIs(t, errCheckDocAccess, ErrFailedToVerifyDocAccessWithACP)
+ require.False(t, hasAccess)
+
+ // Check document accesss for a document that does not exist.
+ hasAccess, errCheckDocAccess = localACP.CheckDocAccess(
+ ctx,
+ ReadPermission,
+ identity1,
+ validPolicyID,
+ "users",
+ "documentID_XYZ",
+ )
+ require.Nil(t, errCheckDocAccess)
+ require.False(t, hasAccess)
+
+ // Register a document.
+ errRegisterDoc := localACP.RegisterDocObject(
+ ctx,
+ identity1,
+ validPolicyID,
+ "users",
+ "documentID_XYZ",
+ )
+ require.Nil(t, errRegisterDoc)
+
+ // Now check using correct identity if it has access.
+ hasAccess, errCheckDocAccess = localACP.CheckDocAccess(
+ ctx,
+ ReadPermission,
+ identity1,
+ validPolicyID,
+ "users",
+ "documentID_XYZ",
+ )
+ require.Nil(t, errCheckDocAccess)
+ require.True(t, hasAccess)
+
+ // Now check using wrong identity, it should not have access.
+ hasAccess, errCheckDocAccess = localACP.CheckDocAccess(
+ ctx,
+ ReadPermission,
+ identity2,
+ validPolicyID,
+ "users",
+ "documentID_XYZ",
+ )
+ require.Nil(t, errCheckDocAccess)
+ require.False(t, hasAccess)
+
+ // identities should continue having their correct behaviour and access even after a restart.
+ errClose := localACP.Close()
+ require.Nil(t, errClose)
+
+ localACP.Init(ctx, acpPath)
+ errStart = localACP.Start(ctx)
+ require.Nil(t, errStart)
+
+ // Now check again after the restart using correct identity if it still has access.
+ hasAccess, errCheckDocAccess = localACP.CheckDocAccess(
+ ctx,
+ ReadPermission,
+ identity1,
+ validPolicyID,
+ "users",
+ "documentID_XYZ",
+ )
+ require.Nil(t, errCheckDocAccess)
+ require.True(t, hasAccess)
+
+ // Now check again after restart using wrong identity, it should continue to not have access.
+ hasAccess, errCheckDocAccess = localACP.CheckDocAccess(
+ ctx,
+ ReadPermission,
+ identity2,
+ validPolicyID,
+ "users",
+ "documentID_XYZ",
+ )
+ require.Nil(t, errCheckDocAccess)
+ require.False(t, hasAccess)
+
+ errClose = localACP.Close()
+ require.Nil(t, errClose)
+}
diff --git a/acp/doc.go b/acp/doc.go
new file mode 100644
index 0000000000..3fd60dd147
--- /dev/null
+++ b/acp/doc.go
@@ -0,0 +1,17 @@
+// Copyright 2024 Democratized Data Foundation
+//
+// Use of this software is governed by the Business Source License
+// included in the file licenses/BSL.txt.
+//
+// As of the Change Date specified in that file, in accordance with
+// the Business Source License, use of this software will be governed
+// by the Apache License, Version 2.0, included in the file
+// licenses/APL.txt.
+
+/*
+Package acp utilizes the sourcehub acp module to bring the functionality
+to defradb, this package also helps avoid the leakage of direct sourcehub
+references through out the code base, and eases in swapping between local
+use case and a more global on sourcehub use case.
+*/
+package acp
diff --git a/acp/dpi.go b/acp/dpi.go
new file mode 100644
index 0000000000..85da972131
--- /dev/null
+++ b/acp/dpi.go
@@ -0,0 +1,73 @@
+// Copyright 2024 Democratized Data Foundation
+//
+// Use of this software is governed by the Business Source License
+// included in the file licenses/BSL.txt.
+//
+// As of the Change Date specified in that file, in accordance with
+// the Business Source License, use of this software will be governed
+// by the Apache License, Version 2.0, included in the file
+// licenses/APL.txt.
+
+package acp
+
+import (
+ "strings"
+)
+
+type DPIPermission int
+
+// Valid DefraDB Policy Interface Permission Type.
+const (
+ ReadPermission DPIPermission = iota
+ WritePermission
+)
+
+// List of all valid DPI permissions, the order of permissions in this list must match
+// the above defined ordering such that iota matches the index position within the list.
+var dpiRequiredPermissions = []string{
+ "read",
+ "write",
+}
+
+func (dpiPermission DPIPermission) String() string {
+ return dpiRequiredPermissions[dpiPermission]
+}
+
+const requiredRegistererRelationName string = "owner"
+
+// validateDPIExpressionOfRequiredPermission validates that the expression under the
+// permission is valid. Moreover, DPI requires that for all required permissions, the
+// expression start with "owner" then a space or symbol, and then follow-up expression.
+// This is important because even if the policy has the required permissions under the
+// resource, it's still possible that those permissions are not granted to the "owner"
+// relation. This validation will help users not shoot themseleves in the foot.
+//
+// Learn more about the DefraDB Policy Interface [ACP](/acp/README.md), can find more
+// detailed valid and invalid `expr` (expression) examples there.
+func validateDPIExpressionOfRequiredPermission(expression string, requiredPermission string) error {
+ exprNoSpace := strings.ReplaceAll(expression, " ", "")
+
+ if !strings.HasPrefix(exprNoSpace, requiredRegistererRelationName) {
+ return newErrExprOfRequiredPermissionMustStartWithRelation(
+ requiredPermission,
+ requiredRegistererRelationName,
+ )
+ }
+
+ restOfTheExpr := exprNoSpace[len(requiredRegistererRelationName):]
+ if len(restOfTheExpr) != 0 {
+ c := restOfTheExpr[0]
+ // First non-space character after the required relation name MUST be a `+`.
+ // The reason we are enforcing this here is because other set operations are
+ // not applied to the registerer relation anyways.
+ if c != '+' {
+ return newErrExprOfRequiredPermissionHasInvalidChar(
+ requiredPermission,
+ requiredRegistererRelationName,
+ c,
+ )
+ }
+ }
+
+ return nil
+}
diff --git a/acp/errors.go b/acp/errors.go
new file mode 100644
index 0000000000..307b32f5ad
--- /dev/null
+++ b/acp/errors.go
@@ -0,0 +1,207 @@
+// Copyright 2024 Democratized Data Foundation
+//
+// Use of this software is governed by the Business Source License
+// included in the file licenses/BSL.txt.
+//
+// As of the Change Date specified in that file, in accordance with
+// the Business Source License, use of this software will be governed
+// by the Apache License, Version 2.0, included in the file
+// licenses/APL.txt.
+
+package acp
+
+import (
+ "github.com/sourcenetwork/defradb/errors"
+)
+
+const (
+ errInitializationOfACPFailed = "initialization of acp failed"
+ errStartingACPInEmptyPath = "starting acp in an empty path"
+ errFailedToAddPolicyWithACP = "failed to add policy with acp"
+ errFailedToRegisterDocWithACP = "failed to register document with acp"
+ errFailedToCheckIfDocIsRegisteredWithACP = "failed to check if doc is registered with acp"
+ errFailedToVerifyDocAccessWithACP = "failed to verify doc access with acp"
+
+ errObjectDidNotRegister = "no-op while registering object (already exists or error) with acp"
+ errNoPolicyArgs = "missing policy arguments, must have both id and resource"
+
+ errPolicyIDMustNotBeEmpty = "policyID must not be empty"
+ errPolicyDoesNotExistWithACP = "policyID specified does not exist with acp"
+ errPolicyValidationFailedWithACP = "policyID validation through acp failed"
+
+ errResourceNameMustNotBeEmpty = "resource name must not be empty"
+ errResourceDoesNotExistOnTargetPolicy = "resource does not exist on the specified policy"
+ errResourceIsMissingRequiredPermission = "resource is missing required permission on policy"
+
+ errExprOfRequiredPermMustStartWithRelation = "expr of required permission must start with required relation"
+ errExprOfRequiredPermHasInvalidChar = "expr of required permission has invalid character after relation"
+)
+
+var (
+ ErrInitializationOfACPFailed = errors.New(errInitializationOfACPFailed)
+ ErrFailedToAddPolicyWithACP = errors.New(errFailedToAddPolicyWithACP)
+ ErrFailedToRegisterDocWithACP = errors.New(errFailedToRegisterDocWithACP)
+ ErrFailedToCheckIfDocIsRegisteredWithACP = errors.New(errFailedToCheckIfDocIsRegisteredWithACP)
+ ErrFailedToVerifyDocAccessWithACP = errors.New(errFailedToVerifyDocAccessWithACP)
+ ErrPolicyDoesNotExistWithACP = errors.New(errPolicyDoesNotExistWithACP)
+
+ ErrResourceDoesNotExistOnTargetPolicy = errors.New(errResourceDoesNotExistOnTargetPolicy)
+
+ ErrPolicyDataMustNotBeEmpty = errors.New("policy data can not be empty")
+ ErrPolicyCreatorMustNotBeEmpty = errors.New("policy creator can not be empty")
+ ErrObjectDidNotRegister = errors.New(errObjectDidNotRegister)
+ ErrNoPolicyArgs = errors.New(errNoPolicyArgs)
+ ErrPolicyIDMustNotBeEmpty = errors.New(errPolicyIDMustNotBeEmpty)
+ ErrResourceNameMustNotBeEmpty = errors.New(errResourceNameMustNotBeEmpty)
+)
+
+func NewErrInitializationOfACPFailed(
+ inner error,
+ Type string,
+ path string,
+) error {
+ return errors.Wrap(
+ errInitializationOfACPFailed,
+ inner,
+ errors.NewKV("Type", Type),
+ errors.NewKV("Path", path),
+ )
+}
+
+func NewErrFailedToAddPolicyWithACP(
+ inner error,
+ Type string,
+ creatorID string,
+) error {
+ return errors.Wrap(
+ errFailedToAddPolicyWithACP,
+ inner,
+ errors.NewKV("Type", Type),
+ errors.NewKV("CreatorID", creatorID),
+ )
+}
+
+func NewErrFailedToRegisterDocWithACP(
+ inner error,
+ Type string,
+ policyID string,
+ creatorID string,
+ resourceName string,
+ docID string,
+) error {
+ return errors.Wrap(
+ errFailedToRegisterDocWithACP,
+ inner,
+ errors.NewKV("Type", Type),
+ errors.NewKV("PolicyID", policyID),
+ errors.NewKV("CreatorID", creatorID),
+ errors.NewKV("ResourceName", resourceName),
+ errors.NewKV("DocID", docID),
+ )
+}
+
+func NewErrFailedToCheckIfDocIsRegisteredWithACP(
+ inner error,
+ Type string,
+ policyID string,
+ resourceName string,
+ docID string,
+) error {
+ return errors.Wrap(
+ errFailedToCheckIfDocIsRegisteredWithACP,
+ inner,
+ errors.NewKV("Type", Type),
+ errors.NewKV("PolicyID", policyID),
+ errors.NewKV("ResourceName", resourceName),
+ errors.NewKV("DocID", docID),
+ )
+}
+
+func NewErrFailedToVerifyDocAccessWithACP(
+ inner error,
+ Type string,
+ policyID string,
+ actorID string,
+ resourceName string,
+ docID string,
+) error {
+ return errors.Wrap(
+ errFailedToVerifyDocAccessWithACP,
+ inner,
+ errors.NewKV("Type", Type),
+ errors.NewKV("PolicyID", policyID),
+ errors.NewKV("ActorID", actorID),
+ errors.NewKV("ResourceName", resourceName),
+ errors.NewKV("DocID", docID),
+ )
+}
+
+func newErrPolicyDoesNotExistWithACP(
+ inner error,
+ policyID string,
+) error {
+ return errors.Wrap(
+ errPolicyDoesNotExistWithACP,
+ inner,
+ errors.NewKV("PolicyID", policyID),
+ )
+}
+
+func newErrPolicyValidationFailedWithACP(
+ inner error,
+ policyID string,
+) error {
+ return errors.Wrap(
+ errPolicyValidationFailedWithACP,
+ inner,
+ errors.NewKV("PolicyID", policyID),
+ )
+}
+
+func newErrResourceDoesNotExistOnTargetPolicy(
+ resourceName string,
+ policyID string,
+) error {
+ return errors.New(
+ errResourceDoesNotExistOnTargetPolicy,
+ errors.NewKV("PolicyID", policyID),
+ errors.NewKV("ResourceName", resourceName),
+ )
+}
+
+func newErrResourceIsMissingRequiredPermission(
+ resourceName string,
+ permission string,
+ policyID string,
+) error {
+ return errors.New(
+ errResourceIsMissingRequiredPermission,
+ errors.NewKV("PolicyID", policyID),
+ errors.NewKV("ResourceName", resourceName),
+ errors.NewKV("Permission", permission),
+ )
+}
+
+func newErrExprOfRequiredPermissionMustStartWithRelation(
+ permission string,
+ relation string,
+) error {
+ return errors.New(
+ errExprOfRequiredPermMustStartWithRelation,
+ errors.NewKV("Permission", permission),
+ errors.NewKV("Relation", relation),
+ )
+}
+
+func newErrExprOfRequiredPermissionHasInvalidChar(
+ permission string,
+ relation string,
+ char byte,
+) error {
+ return errors.New(
+ errExprOfRequiredPermHasInvalidChar,
+ errors.NewKV("Permission", permission),
+ errors.NewKV("Relation", relation),
+ errors.NewKV("Character", string(char)),
+ )
+}
diff --git a/acp/identity/identity.go b/acp/identity/identity.go
new file mode 100644
index 0000000000..108c183748
--- /dev/null
+++ b/acp/identity/identity.go
@@ -0,0 +1,41 @@
+// Copyright 2024 Democratized Data Foundation
+//
+// Use of this software is governed by the Business Source License
+// included in the file licenses/BSL.txt.
+//
+// As of the Change Date specified in that file, in accordance with
+// the Business Source License, use of this software will be governed
+// by the Apache License, Version 2.0, included in the file
+// licenses/APL.txt.
+
+/*
+Package identity provides defradb identity.
+*/
+
+package identity
+
+import "github.com/sourcenetwork/immutable"
+
+// Identity is the unique identifier for an actor.
+type Identity string
+
+var (
+ // None is an empty identity.
+ None = immutable.None[Identity]()
+)
+
+// New makes a new identity if the input is not empty otherwise, returns None.
+func New(identity string) immutable.Option[Identity] {
+ // TODO-ACP: There will be more validation once sourcehub gets some utilities.
+ // Then a validation function would do the validation, will likely do outside this function.
+ // https://github.com/sourcenetwork/defradb/issues/2358
+ if identity == "" {
+ return None
+ }
+ return immutable.Some(Identity(identity))
+}
+
+// String returns the string representation of the identity.
+func (i Identity) String() string {
+ return string(i)
+}
diff --git a/cli/acp.go b/cli/acp.go
new file mode 100644
index 0000000000..30705ac908
--- /dev/null
+++ b/cli/acp.go
@@ -0,0 +1,29 @@
+// Copyright 2024 Democratized Data Foundation
+//
+// Use of this software is governed by the Business Source License
+// included in the file licenses/BSL.txt.
+//
+// As of the Change Date specified in that file, in accordance with
+// the Business Source License, use of this software will be governed
+// by the Apache License, Version 2.0, included in the file
+// licenses/APL.txt.
+
+package cli
+
+import (
+ "github.com/spf13/cobra"
+)
+
+func MakeACPCommand() *cobra.Command {
+ var cmd = &cobra.Command{
+ Use: "acp",
+ Short: "Interact with the access control system of a DefraDB node",
+ Long: `Interact with the access control system of a DefraDB node
+
+Learn more about [ACP](/acp/README.md)
+
+ `,
+ }
+
+ return cmd
+}
diff --git a/cli/acp_policy.go b/cli/acp_policy.go
new file mode 100644
index 0000000000..92ae9321f0
--- /dev/null
+++ b/cli/acp_policy.go
@@ -0,0 +1,25 @@
+// Copyright 2024 Democratized Data Foundation
+//
+// Use of this software is governed by the Business Source License
+// included in the file licenses/BSL.txt.
+//
+// As of the Change Date specified in that file, in accordance with
+// the Business Source License, use of this software will be governed
+// by the Apache License, Version 2.0, included in the file
+// licenses/APL.txt.
+
+package cli
+
+import (
+ "github.com/spf13/cobra"
+)
+
+func MakeACPPolicyCommand() *cobra.Command {
+ var cmd = &cobra.Command{
+ Use: "policy",
+ Short: "Interact with the acp policy features of DefraDB instance",
+ Long: `Interact with the acp policy features of DefraDB instance`,
+ }
+
+ return cmd
+}
diff --git a/cli/acp_policy_add.go b/cli/acp_policy_add.go
new file mode 100644
index 0000000000..bca5e95abd
--- /dev/null
+++ b/cli/acp_policy_add.go
@@ -0,0 +1,119 @@
+// Copyright 2024 Democratized Data Foundation
+//
+// Use of this software is governed by the Business Source License
+// included in the file licenses/BSL.txt.
+//
+// As of the Change Date specified in that file, in accordance with
+// the Business Source License, use of this software will be governed
+// by the Apache License, Version 2.0, included in the file
+// licenses/APL.txt.
+
+package cli
+
+import (
+ "io"
+ "os"
+
+ "github.com/spf13/cobra"
+)
+
+func MakeACPPolicyAddCommand() *cobra.Command {
+ const fileFlagLong string = "file"
+ const fileFlagShort string = "f"
+
+ var policyFile string
+
+ var cmd = &cobra.Command{
+ Use: "add [-i --identity] [policy]",
+ Short: "Add new policy",
+ Long: `Add new policy
+
+Notes:
+ - Can not add a policy without specifying an identity.
+ - ACP must be available (i.e. ACP can not be disabled).
+ - A non-DPI policy will be accepted (will be registered with acp system).
+ - But only a valid DPI policyID & resource can be specified on a schema.
+ - DPI validation happens when attempting to add a schema with '@policy'.
+ - Learn more about [ACP & DPI Rules](/acp/README.md)
+
+Example: add from an argument string:
+ defradb client acp policy add -i cosmos1f2djr7dl9vhrk3twt3xwqp09nhtzec9mdkf70j '
+description: A Valid DefraDB Policy Interface
+
+actor:
+ name: actor
+
+resources:
+ users:
+ permissions:
+ read:
+ expr: owner + reader
+ write:
+ expr: owner
+
+ relations:
+ owner:
+ types:
+ - actor
+ reader:
+ types:
+ - actor
+'
+
+Example: add from file:
+ defradb client acp policy add -i cosmos17r39df0hdcrgnmmw4mvu7qgk5nu888c7uvv37y -f policy.yml
+
+Example: add from file, verbose flags:
+ defradb client acp policy add --identity cosmos1kpw734v54g0t0d8tcye8ee5jc3gld0tcr2q473 --file policy.yml
+
+Example: add from stdin:
+ cat policy.yml | defradb client acp policy add -
+
+`,
+ RunE: func(cmd *cobra.Command, args []string) error {
+ // TODO-ACP: Ensure here (before going through acp system) if the required identity argument
+ // is valid, if it is valid then keep proceeding further, otherwise return this error:
+ // `NewErrRequiredFlagInvalid(identityFlagLongRequired, identityFlagShortRequired)`
+ // Issue: https://github.com/sourcenetwork/defradb/issues/2358
+
+ // Handle policy argument.
+ extraArgsProvided := len(args)
+ var policy string
+ switch {
+ case policyFile != "":
+ data, err := os.ReadFile(policyFile)
+ if err != nil {
+ return err
+ }
+ policy = string(data)
+
+ case extraArgsProvided > 0 && args[extraArgsProvided-1] == "-":
+ data, err := io.ReadAll(cmd.InOrStdin())
+ if err != nil {
+ return err
+ }
+ policy = string(data)
+
+ case extraArgsProvided > 0:
+ policy = args[0]
+
+ default:
+ return ErrPolicyFileArgCanNotBeEmpty
+ }
+
+ db := mustGetContextDB(cmd)
+ policyResult, err := db.AddPolicy(
+ cmd.Context(),
+ policy,
+ )
+
+ if err != nil {
+ return err
+ }
+
+ return writeJSON(cmd, policyResult)
+ },
+ }
+ cmd.Flags().StringVarP(&policyFile, fileFlagLong, fileFlagShort, "", "File to load a policy from")
+ return cmd
+}
diff --git a/cli/cli.go b/cli/cli.go
index 4cdb8c443b..38209a9f69 100644
--- a/cli/cli.go
+++ b/cli/cli.go
@@ -16,10 +16,10 @@ package cli
import (
"github.com/spf13/cobra"
- "github.com/sourcenetwork/defradb/logging"
+ "github.com/sourcenetwork/corelog"
)
-var log = logging.MustNewLogger("cli")
+var log = corelog.NewLogger("cli")
// NewDefraCommand returns the root command instanciated with its tree of subcommands.
func NewDefraCommand() *cobra.Command {
@@ -62,6 +62,16 @@ func NewDefraCommand() *cobra.Command {
schema_migrate,
)
+ policy := MakeACPPolicyCommand()
+ policy.AddCommand(
+ MakeACPPolicyAddCommand(),
+ )
+
+ acp := MakeACPCommand()
+ acp.AddCommand(
+ policy,
+ )
+
view := MakeViewCommand()
view.AddCommand(
MakeViewAddCommand(),
@@ -95,6 +105,7 @@ func NewDefraCommand() *cobra.Command {
MakeCollectionUpdateCommand(),
MakeCollectionCreateCommand(),
MakeCollectionDescribeCommand(),
+ MakeCollectionPatchCommand(),
)
client := MakeClientCommand()
@@ -102,6 +113,7 @@ func NewDefraCommand() *cobra.Command {
MakeDumpCommand(),
MakeRequestCommand(),
schema,
+ acp,
view,
index,
p2p,
diff --git a/cli/client.go b/cli/client.go
index 532712e8f8..06460ca70d 100644
--- a/cli/client.go
+++ b/cli/client.go
@@ -16,6 +16,7 @@ import (
func MakeClientCommand() *cobra.Command {
var txID uint64
+ var identity string
var cmd = &cobra.Command{
Use: "client",
Short: "Interact with a DefraDB node",
@@ -28,12 +29,16 @@ Execute queries, add schema types, obtain node info, etc.`,
if err := setContextConfig(cmd); err != nil {
return err
}
+ if err := setContextIdentity(cmd, identity); err != nil {
+ return err
+ }
if err := setContextTransaction(cmd, txID); err != nil {
return err
}
- return setContextStore(cmd)
+ return setContextDB(cmd)
},
}
+ cmd.PersistentFlags().StringVarP(&identity, "identity", "i", "", "ACP Identity")
cmd.PersistentFlags().Uint64Var(&txID, "tx", 0, "Transaction ID")
return cmd
}
diff --git a/cli/collection.go b/cli/collection.go
index 23ef9194ae..cdf3d41f5a 100644
--- a/cli/collection.go
+++ b/cli/collection.go
@@ -17,11 +17,11 @@ import (
"github.com/spf13/cobra"
"github.com/sourcenetwork/defradb/client"
- "github.com/sourcenetwork/defradb/datastore"
)
func MakeCollectionCommand() *cobra.Command {
var txID uint64
+ var identity string
var name string
var schemaRoot string
var versionID string
@@ -38,10 +38,13 @@ func MakeCollectionCommand() *cobra.Command {
if err := setContextConfig(cmd); err != nil {
return err
}
+ if err := setContextIdentity(cmd, identity); err != nil {
+ return err
+ }
if err := setContextTransaction(cmd, txID); err != nil {
return err
}
- if err := setContextStore(cmd); err != nil {
+ if err := setContextDB(cmd); err != nil {
return err
}
store := mustGetContextStore(cmd)
@@ -71,16 +74,13 @@ func MakeCollectionCommand() *cobra.Command {
}
col := cols[0]
- if tx, ok := cmd.Context().Value(txContextKey).(datastore.Txn); ok {
- col = col.WithTxn(tx)
- }
-
ctx := context.WithValue(cmd.Context(), colContextKey, col)
cmd.SetContext(ctx)
return nil
},
}
cmd.PersistentFlags().Uint64Var(&txID, "tx", 0, "Transaction ID")
+ cmd.PersistentFlags().StringVarP(&identity, "identity", "i", "", "ACP Identity")
cmd.PersistentFlags().StringVar(&name, "name", "", "Collection name")
cmd.PersistentFlags().StringVar(&schemaRoot, "schema", "", "Collection schema Root")
cmd.PersistentFlags().StringVar(&versionID, "version", "", "Collection version ID")
diff --git a/cli/collection_create.go b/cli/collection_create.go
index efeee61494..df7d8794b5 100644
--- a/cli/collection_create.go
+++ b/cli/collection_create.go
@@ -22,29 +22,27 @@ import (
func MakeCollectionCreateCommand() *cobra.Command {
var file string
var cmd = &cobra.Command{
- Use: "create ",
+ Use: "create [-i --identity] ",
Short: "Create a new document.",
Long: `Create a new document.
-Example: create from string
+Example: create from string:
defradb client collection create --name User '{ "name": "Bob" }'
-Example: create multiple from string
+Example: create from string, with identity:
+ defradb client collection create -i cosmos1f2djr7dl9vhrk3twt3xwqp09nhtzec9mdkf70j --name User '{ "name": "Bob" }'
+
+Example: create multiple from string:
defradb client collection create --name User '[{ "name": "Alice" }, { "name": "Bob" }]'
-Example: create from file
+Example: create from file:
defradb client collection create --name User -f document.json
-Example: create from stdin
+Example: create from stdin:
cat document.json | defradb client collection create --name User -
`,
Args: cobra.RangeArgs(0, 1),
RunE: func(cmd *cobra.Command, args []string) error {
- col, ok := tryGetContextCollection(cmd)
- if !ok {
- return cmd.Usage()
- }
-
var docData []byte
switch {
case file != "":
@@ -65,15 +63,20 @@ Example: create from stdin
return ErrNoDocOrFile
}
+ col, ok := tryGetContextCollection(cmd)
+ if !ok {
+ return cmd.Usage()
+ }
+
if client.IsJSONArray(docData) {
- docs, err := client.NewDocsFromJSON(docData, col.Schema())
+ docs, err := client.NewDocsFromJSON(docData, col.Definition())
if err != nil {
return err
}
return col.CreateMany(cmd.Context(), docs)
}
- doc, err := client.NewDocFromJSON(docData, col.Schema())
+ doc, err := client.NewDocFromJSON(docData, col.Definition())
if err != nil {
return err
}
diff --git a/cli/collection_delete.go b/cli/collection_delete.go
index d1f945d9ae..a9776d1985 100644
--- a/cli/collection_delete.go
+++ b/cli/collection_delete.go
@@ -17,17 +17,20 @@ import (
)
func MakeCollectionDeleteCommand() *cobra.Command {
- var argDocIDs []string
+ var argDocID string
var filter string
var cmd = &cobra.Command{
- Use: "delete [--filter --docID ]",
+ Use: "delete [-i --identity] [--filter --docID ]",
Short: "Delete documents by docID or filter.",
Long: `Delete documents by docID or filter and lists the number of documents deleted.
-Example: delete by docID(s)
- defradb client collection delete --name User --docID bae-123,bae-456
+Example: delete by docID:
+ defradb client collection delete --name User --docID bae-123
-Example: delete by filter
+Example: delete by docID with identity:
+ defradb client collection delete -i cosmos1f2djr7dl9vhrk3twt3xwqp09nhtzec9mdkf70j --name User --docID bae-123
+
+Example: delete by filter:
defradb client collection delete --name User --filter '{ "_gte": { "points": 100 } }'
`,
RunE: func(cmd *cobra.Command, args []string) error {
@@ -37,30 +40,13 @@ Example: delete by filter
}
switch {
- case len(argDocIDs) == 1:
- docID, err := client.NewDocIDFromString(argDocIDs[0])
- if err != nil {
- return err
- }
- res, err := col.DeleteWithDocID(cmd.Context(), docID)
- if err != nil {
- return err
- }
- return writeJSON(cmd, res)
- case len(argDocIDs) > 1:
- docIDs := make([]client.DocID, len(argDocIDs))
- for i, v := range argDocIDs {
- docID, err := client.NewDocIDFromString(v)
- if err != nil {
- return err
- }
- docIDs[i] = docID
- }
- res, err := col.DeleteWithDocIDs(cmd.Context(), docIDs)
+ case argDocID != "":
+ docID, err := client.NewDocIDFromString(argDocID)
if err != nil {
return err
}
- return writeJSON(cmd, res)
+ _, err = col.Delete(cmd.Context(), docID)
+ return err
case filter != "":
res, err := col.DeleteWithFilter(cmd.Context(), filter)
if err != nil {
@@ -72,7 +58,7 @@ Example: delete by filter
}
},
}
- cmd.Flags().StringSliceVar(&argDocIDs, "docID", nil, "Document ID")
+ cmd.Flags().StringVar(&argDocID, "docID", "", "Document ID")
cmd.Flags().StringVar(&filter, "filter", "", "Document filter")
return cmd
}
diff --git a/cli/collection_get.go b/cli/collection_get.go
index 55c84d6289..9ad5566f62 100644
--- a/cli/collection_get.go
+++ b/cli/collection_get.go
@@ -19,12 +19,15 @@ import (
func MakeCollectionGetCommand() *cobra.Command {
var showDeleted bool
var cmd = &cobra.Command{
- Use: "get [--show-deleted]",
+ Use: "get [-i --identity] [--show-deleted] ",
Short: "View document fields.",
Long: `View document fields.
Example:
defradb client collection get --name User bae-123
+
+Example to get a private document we must use an identity:
+ defradb client collection get -i cosmos1f2djr7dl9vhrk3twt3xwqp09nhtzec9mdkf70j --name User bae-123
`,
Args: cobra.ExactArgs(1),
RunE: func(cmd *cobra.Command, args []string) error {
diff --git a/cli/collection_list_doc_ids.go b/cli/collection_list_doc_ids.go
index 7112a88817..168bb74a5a 100644
--- a/cli/collection_list_doc_ids.go
+++ b/cli/collection_list_doc_ids.go
@@ -18,12 +18,15 @@ import (
func MakeCollectionListDocIDsCommand() *cobra.Command {
var cmd = &cobra.Command{
- Use: "docIDs",
+ Use: "docIDs [-i --identity]",
Short: "List all document IDs (docIDs).",
Long: `List all document IDs (docIDs).
-Example:
+Example: list all docID(s):
defradb client collection docIDs --name User
+
+Example: list all docID(s), with an identity:
+ defradb client collection docIDs -i cosmos1f2djr7dl9vhrk3twt3xwqp09nhtzec9mdkf70j --name User
`,
RunE: func(cmd *cobra.Command, args []string) error {
col, ok := tryGetContextCollection(cmd)
diff --git a/cli/collection_patch.go b/cli/collection_patch.go
new file mode 100644
index 0000000000..49d5a91305
--- /dev/null
+++ b/cli/collection_patch.go
@@ -0,0 +1,69 @@
+// Copyright 2024 Democratized Data Foundation
+//
+// Use of this software is governed by the Business Source License
+// included in the file licenses/BSL.txt.
+//
+// As of the Change Date specified in that file, in accordance with
+// the Business Source License, use of this software will be governed
+// by the Apache License, Version 2.0, included in the file
+// licenses/APL.txt.
+
+package cli
+
+import (
+ "fmt"
+ "io"
+ "os"
+
+ "github.com/spf13/cobra"
+)
+
+func MakeCollectionPatchCommand() *cobra.Command {
+ var patchFile string
+ var cmd = &cobra.Command{
+ Use: "patch [patch]",
+ Short: "Patch existing collection descriptions",
+ Long: `Patch existing collection descriptions.
+
+Uses JSON Patch to modify collection descriptions.
+
+Example: patch from an argument string:
+ defradb client collection patch '[{ "op": "add", "path": "...", "value": {...} }]'
+
+Example: patch from file:
+ defradb client collection patch -p patch.json
+
+Example: patch from stdin:
+ cat patch.json | defradb client collection patch -
+
+To learn more about the DefraDB GraphQL Schema Language, refer to https://docs.source.network.`,
+ Args: cobra.RangeArgs(0, 1),
+ RunE: func(cmd *cobra.Command, args []string) error {
+ store := mustGetContextStore(cmd)
+
+ var patch string
+ switch {
+ case patchFile != "":
+ data, err := os.ReadFile(patchFile)
+ if err != nil {
+ return err
+ }
+ patch = string(data)
+ case len(args) > 0 && args[0] == "-":
+ data, err := io.ReadAll(cmd.InOrStdin())
+ if err != nil {
+ return err
+ }
+ patch = string(data)
+ case len(args) == 1:
+ patch = args[0]
+ default:
+ return fmt.Errorf("patch cannot be empty")
+ }
+
+ return store.PatchCollection(cmd.Context(), patch)
+ },
+ }
+ cmd.Flags().StringVarP(&patchFile, "patch-file", "p", "", "File to load a patch from")
+ return cmd
+}
diff --git a/cli/collection_update.go b/cli/collection_update.go
index 42354948a9..3e676edce9 100644
--- a/cli/collection_update.go
+++ b/cli/collection_update.go
@@ -17,24 +17,28 @@ import (
)
func MakeCollectionUpdateCommand() *cobra.Command {
- var argDocIDs []string
+ var argDocID string
var filter string
var updater string
var cmd = &cobra.Command{
- Use: "update [--filter --docID --updater ] ",
+ Use: "update [-i --identity] [--filter --docID --updater ] ",
Short: "Update documents by docID or filter.",
Long: `Update documents by docID or filter.
-Example: update from string
+Example: update from string:
defradb client collection update --name User --docID bae-123 '{ "name": "Bob" }'
-Example: update by filter
+Example: update by filter:
defradb client collection update --name User \
--filter '{ "_gte": { "points": 100 } }' --updater '{ "verified": true }'
-Example: update by docIDs
+Example: update by docID:
defradb client collection update --name User \
- --docID bae-123,bae-456 --updater '{ "verified": true }'
+ --docID bae-123 --updater '{ "verified": true }'
+
+Example: update private docID, with identity:
+ defradb client collection update -i cosmos1f2djr7dl9vhrk3twt3xwqp09nhtzec9mdkf70j --name User \
+ --docID bae-123 --updater '{ "verified": true }'
`,
Args: cobra.RangeArgs(0, 1),
RunE: func(cmd *cobra.Command, args []string) error {
@@ -44,38 +48,14 @@ Example: update by docIDs
}
switch {
- case len(argDocIDs) == 1 && updater != "":
- docID, err := client.NewDocIDFromString(argDocIDs[0])
- if err != nil {
- return err
- }
- res, err := col.UpdateWithDocID(cmd.Context(), docID, updater)
- if err != nil {
- return err
- }
- return writeJSON(cmd, res)
- case len(argDocIDs) > 1 && updater != "":
- docIDs := make([]client.DocID, len(argDocIDs))
- for i, v := range argDocIDs {
- docID, err := client.NewDocIDFromString(v)
- if err != nil {
- return err
- }
- docIDs[i] = docID
- }
- res, err := col.UpdateWithDocIDs(cmd.Context(), docIDs, updater)
- if err != nil {
- return err
- }
- return writeJSON(cmd, res)
case filter != "" && updater != "":
res, err := col.UpdateWithFilter(cmd.Context(), filter, updater)
if err != nil {
return err
}
return writeJSON(cmd, res)
- case len(argDocIDs) == 1 && len(args) == 1:
- docID, err := client.NewDocIDFromString(argDocIDs[0])
+ case argDocID != "" && len(args) == 1:
+ docID, err := client.NewDocIDFromString(argDocID)
if err != nil {
return err
}
@@ -92,7 +72,7 @@ Example: update by docIDs
}
},
}
- cmd.Flags().StringSliceVar(&argDocIDs, "docID", nil, "Document ID")
+ cmd.Flags().StringVar(&argDocID, "docID", "", "Document ID")
cmd.Flags().StringVar(&filter, "filter", "", "Document filter")
cmd.Flags().StringVar(&updater, "updater", "", "Document updater")
return cmd
diff --git a/cli/config.go b/cli/config.go
index bb57a8cb3d..fd275a2d01 100644
--- a/cli/config.go
+++ b/cli/config.go
@@ -15,10 +15,9 @@ import (
"path/filepath"
"strings"
+ "github.com/sourcenetwork/corelog"
"github.com/spf13/pflag"
"github.com/spf13/viper"
-
- "github.com/sourcenetwork/defradb/logging"
)
const (
@@ -41,11 +40,13 @@ var configPaths = []string{
// configFlags is a mapping of config keys to cli flags to bind to.
var configFlags = map[string]string{
- "log.level": "loglevel",
- "log.output": "logoutput",
- "log.format": "logformat",
- "log.stacktrace": "logtrace",
- "log.nocolor": "lognocolor",
+ "log.level": "log-level",
+ "log.output": "log-output",
+ "log.format": "log-format",
+ "log.stacktrace": "log-stacktrace",
+ "log.source": "log-source",
+ "log.overrides": "log-overrides",
+ "log.nocolor": "log-no-color",
"api.address": "url",
"datastore.maxtxnretries": "max-txn-retries",
"datastore.store": "store",
@@ -125,14 +126,17 @@ func loadConfig(rootdir string, flags *pflag.FlagSet) (*viper.Viper, error) {
}
}
- logCfg := loggingConfig(cfg.Sub("log"))
- logCfg.OverridesByLoggerName = make(map[string]logging.Config)
+ // set default logging config
+ corelog.SetConfig(corelog.Config{
+ Level: cfg.GetString("log.level"),
+ Format: cfg.GetString("log.format"),
+ Output: cfg.GetString("log.output"),
+ EnableStackTrace: cfg.GetBool("log.stacktrace"),
+ EnableSource: cfg.GetBool("log.source"),
+ })
- // apply named logging overrides
- for key := range cfg.GetStringMap("log.overrides") {
- logCfg.OverridesByLoggerName[key] = loggingConfig(cfg.Sub("log.overrides." + key))
- }
- logging.SetConfig(logCfg)
+ // set logging config overrides
+ corelog.SetConfigOverrides(cfg.GetString("log.overrides"))
return cfg, nil
}
@@ -147,39 +151,3 @@ func bindConfigFlags(cfg *viper.Viper, flags *pflag.FlagSet) error {
}
return nil
}
-
-// loggingConfig returns a new logging config from the given config.
-func loggingConfig(cfg *viper.Viper) logging.Config {
- var level int8
- switch value := cfg.GetString("level"); value {
- case configLogLevelDebug:
- level = logging.Debug
- case configLogLevelInfo:
- level = logging.Info
- case configLogLevelError:
- level = logging.Error
- case configLogLevelFatal:
- level = logging.Fatal
- default:
- level = logging.Info
- }
-
- var format logging.EncoderFormat
- switch value := cfg.GetString("format"); value {
- case configLogFormatJSON:
- format = logging.JSON
- case configLogFormatCSV:
- format = logging.CSV
- default:
- format = logging.CSV
- }
-
- return logging.Config{
- Level: logging.NewLogLevelOption(level),
- EnableStackTrace: logging.NewEnableStackTraceOption(cfg.GetBool("stacktrace")),
- DisableColor: logging.NewDisableColorOption(cfg.GetBool("nocolor")),
- EncoderFormat: logging.NewEncoderFormatOption(format),
- OutputPaths: []string{cfg.GetString("output")},
- EnableCaller: logging.NewEnableCallerOption(cfg.GetBool("caller")),
- }
-}
diff --git a/cli/config_test.go b/cli/config_test.go
index 210743477c..39a17d60fd 100644
--- a/cli/config_test.go
+++ b/cli/config_test.go
@@ -53,9 +53,10 @@ func TestLoadConfigNotExist(t *testing.T) {
assert.Equal(t, []string{}, cfg.GetStringSlice("net.peers"))
assert.Equal(t, "info", cfg.GetString("log.level"))
- assert.Equal(t, false, cfg.GetBool("log.stacktrace"))
- assert.Equal(t, "csv", cfg.GetString("log.format"))
assert.Equal(t, "stderr", cfg.GetString("log.output"))
+ assert.Equal(t, "text", cfg.GetString("log.format"))
+ assert.Equal(t, false, cfg.GetBool("log.stacktrace"))
+ assert.Equal(t, false, cfg.GetBool("log.source"))
+ assert.Equal(t, "", cfg.GetString("log.overrides"))
assert.Equal(t, false, cfg.GetBool("log.nocolor"))
- assert.Equal(t, false, cfg.GetBool("log.caller"))
}
diff --git a/cli/dump.go b/cli/dump.go
index a3d155605b..76b36bab99 100644
--- a/cli/dump.go
+++ b/cli/dump.go
@@ -12,8 +12,6 @@ package cli
import (
"github.com/spf13/cobra"
-
- "github.com/sourcenetwork/defradb/client"
)
func MakeDumpCommand() *cobra.Command {
@@ -21,7 +19,7 @@ func MakeDumpCommand() *cobra.Command {
Use: "dump",
Short: "Dump the contents of DefraDB node-side",
RunE: func(cmd *cobra.Command, _ []string) (err error) {
- db := cmd.Context().Value(dbContextKey).(client.DB)
+ db := mustGetContextDB(cmd)
return db.PrintDump(cmd.Context())
},
}
diff --git a/cli/errors.go b/cli/errors.go
index bb124bc7f9..02cd252b59 100644
--- a/cli/errors.go
+++ b/cli/errors.go
@@ -11,25 +11,37 @@
package cli
import (
+ "fmt"
+
"github.com/sourcenetwork/defradb/errors"
)
const (
errInvalidLensConfig string = "invalid lens configuration"
errSchemaVersionNotOfSchema string = "the given schema version is from a different schema"
+ errRequiredFlag string = "the required flag [--%s|-%s] is %s"
)
var (
- ErrNoDocOrFile = errors.New("document or file must be defined")
- ErrInvalidDocument = errors.New("invalid document")
- ErrNoDocIDOrFilter = errors.New("docID or filter must be defined")
- ErrInvalidExportFormat = errors.New("invalid export format")
- ErrNoLensConfig = errors.New("lens config cannot be empty")
- ErrInvalidLensConfig = errors.New("invalid lens configuration")
- ErrSchemaVersionNotOfSchema = errors.New(errSchemaVersionNotOfSchema)
- ErrViewAddMissingArgs = errors.New("please provide a base query and output SDL for this view")
+ ErrNoDocOrFile = errors.New("document or file must be defined")
+ ErrInvalidDocument = errors.New("invalid document")
+ ErrNoDocIDOrFilter = errors.New("docID or filter must be defined")
+ ErrInvalidExportFormat = errors.New("invalid export format")
+ ErrNoLensConfig = errors.New("lens config cannot be empty")
+ ErrInvalidLensConfig = errors.New("invalid lens configuration")
+ ErrSchemaVersionNotOfSchema = errors.New(errSchemaVersionNotOfSchema)
+ ErrViewAddMissingArgs = errors.New("please provide a base query and output SDL for this view")
+ ErrPolicyFileArgCanNotBeEmpty = errors.New("policy file argument can not be empty")
)
+func NewErrRequiredFlagEmpty(longName string, shortName string) error {
+ return errors.New(fmt.Sprintf(errRequiredFlag, longName, shortName, "empty"))
+}
+
+func NewErrRequiredFlagInvalid(longName string, shortName string) error {
+ return errors.New(fmt.Sprintf(errRequiredFlag, longName, shortName, "invalid"))
+}
+
func NewErrInvalidLensConfig(inner error) error {
return errors.Wrap(errInvalidLensConfig, inner)
}
diff --git a/cli/index_create.go b/cli/index_create.go
index bfe5ec64c2..0d724da15b 100644
--- a/cli/index_create.go
+++ b/cli/index_create.go
@@ -14,7 +14,6 @@ import (
"github.com/spf13/cobra"
"github.com/sourcenetwork/defradb/client"
- "github.com/sourcenetwork/defradb/datastore"
)
func MakeIndexCreateCommand() *cobra.Command {
@@ -52,9 +51,6 @@ Example: create a named index for 'Users' collection on 'name' field:
if err != nil {
return err
}
- if tx, ok := cmd.Context().Value(txContextKey).(datastore.Txn); ok {
- col = col.WithTxn(tx)
- }
desc, err = col.CreateIndex(cmd.Context(), desc)
if err != nil {
return err
diff --git a/cli/index_drop.go b/cli/index_drop.go
index 96f007268d..5dd069b5da 100644
--- a/cli/index_drop.go
+++ b/cli/index_drop.go
@@ -12,8 +12,6 @@ package cli
import (
"github.com/spf13/cobra"
-
- "github.com/sourcenetwork/defradb/datastore"
)
func MakeIndexDropCommand() *cobra.Command {
@@ -34,9 +32,6 @@ Example: drop the index 'UsersByName' for 'Users' collection:
if err != nil {
return err
}
- if tx, ok := cmd.Context().Value(txContextKey).(datastore.Txn); ok {
- col = col.WithTxn(tx)
- }
return col.DropIndex(cmd.Context(), nameArg)
},
}
diff --git a/cli/index_list.go b/cli/index_list.go
index bf1fd21251..481acb7d37 100644
--- a/cli/index_list.go
+++ b/cli/index_list.go
@@ -12,8 +12,6 @@ package cli
import (
"github.com/spf13/cobra"
-
- "github.com/sourcenetwork/defradb/datastore"
)
func MakeIndexListCommand() *cobra.Command {
@@ -38,9 +36,6 @@ Example: show all index for 'Users' collection:
if err != nil {
return err
}
- if tx, ok := cmd.Context().Value(txContextKey).(datastore.Txn); ok {
- col = col.WithTxn(tx)
- }
indexes, err := col.GetIndexes(cmd.Context())
if err != nil {
return err
diff --git a/cli/request.go b/cli/request.go
index d5e37e79a3..3dba0c197d 100644
--- a/cli/request.go
+++ b/cli/request.go
@@ -27,7 +27,7 @@ const (
func MakeRequestCommand() *cobra.Command {
var filePath string
var cmd = &cobra.Command{
- Use: "query [query request]",
+ Use: "query [-i --identity] [request]",
Short: "Send a DefraDB GraphQL query request",
Long: `Send a DefraDB GraphQL query request to the database.
@@ -37,6 +37,9 @@ A query request can be sent as a single argument. Example command:
Do a query request from a file by using the '-f' flag. Example command:
defradb client query -f request.graphql
+Do a query request from a file and with an identity. Example command:
+ defradb client query -i cosmos1f2djr7dl9vhrk3twt3xwqp09nhtzec9mdkf70j -f request.graphql
+
Or it can be sent via stdin by using the '-' special syntax. Example command:
cat request.graphql | defradb client query -
@@ -45,8 +48,6 @@ with the database more conveniently.
To learn more about the DefraDB GraphQL Query Language, refer to https://docs.source.network.`,
RunE: func(cmd *cobra.Command, args []string) error {
- store := mustGetContextStore(cmd)
-
var request string
switch {
case filePath != "":
@@ -68,6 +69,8 @@ To learn more about the DefraDB GraphQL Query Language, refer to https://docs.so
if request == "" {
return errors.New("request cannot be empty")
}
+
+ store := mustGetContextStore(cmd)
result := store.ExecRequest(cmd.Context(), request)
var errors []string
diff --git a/cli/root.go b/cli/root.go
index e4ba349f76..8fc8baf628 100644
--- a/cli/root.go
+++ b/cli/root.go
@@ -38,31 +38,43 @@ Start a DefraDB node, interact with a local or remote node, and much more.
)
cmd.PersistentFlags().String(
- "loglevel",
+ "log-level",
"info",
"Log level to use. Options are debug, info, error, fatal",
)
cmd.PersistentFlags().String(
- "logoutput",
+ "log-output",
"stderr",
- "Log output path",
+ "Log output path. Options are stderr or stdout.",
)
cmd.PersistentFlags().String(
- "logformat",
- "csv",
- "Log format to use. Options are csv, json",
+ "log-format",
+ "text",
+ "Log format to use. Options are text or json",
)
cmd.PersistentFlags().Bool(
- "logtrace",
+ "log-stacktrace",
false,
"Include stacktrace in error and fatal logs",
)
cmd.PersistentFlags().Bool(
- "lognocolor",
+ "log-source",
+ false,
+ "Include source location in logs",
+ )
+
+ cmd.PersistentFlags().String(
+ "log-overrides",
+ "",
+ "Logger config overrides. Format ,=,...;,...",
+ )
+
+ cmd.PersistentFlags().Bool(
+ "log-no-color",
false,
"Disable colored log output",
)
diff --git a/cli/schema_add.go b/cli/schema_add.go
index f987d062df..e81896322d 100644
--- a/cli/schema_add.go
+++ b/cli/schema_add.go
@@ -25,6 +25,11 @@ func MakeSchemaAddCommand() *cobra.Command {
Short: "Add new schema",
Long: `Add new schema.
+Schema Object with a '@policy(id:".." resource: "..")' linked will only be accepted if:
+ - ACP is available (i.e. ACP is not disabled).
+ - The specified resource adheres to the Document Access Control DPI Rules.
+ - Learn more about [ACP & DPI Rules](/acp/README.md)
+
Example: add from an argument string:
defradb client schema add 'type Foo { ... }'
diff --git a/cli/schema_migration_down.go b/cli/schema_migration_down.go
index 1d7622257c..a49f359694 100644
--- a/cli/schema_migration_down.go
+++ b/cli/schema_migration_down.go
@@ -17,8 +17,6 @@ import (
"github.com/sourcenetwork/immutable/enumerable"
"github.com/spf13/cobra"
-
- "github.com/sourcenetwork/defradb/datastore"
)
func MakeSchemaMigrationDownCommand() *cobra.Command {
@@ -67,12 +65,7 @@ Example: migrate from stdin
if err := json.Unmarshal(srcData, &src); err != nil {
return err
}
- lens := store.LensRegistry()
- if tx, ok := cmd.Context().Value(txContextKey).(datastore.Txn); ok {
- lens = lens.WithTxn(tx)
- }
-
- out, err := lens.MigrateDown(cmd.Context(), enumerable.New(src), collectionID)
+ out, err := store.LensRegistry().MigrateDown(cmd.Context(), enumerable.New(src), collectionID)
if err != nil {
return err
}
diff --git a/cli/schema_migration_reload.go b/cli/schema_migration_reload.go
index 4266b3ec3f..8ffb5542f1 100644
--- a/cli/schema_migration_reload.go
+++ b/cli/schema_migration_reload.go
@@ -12,8 +12,6 @@ package cli
import (
"github.com/spf13/cobra"
-
- "github.com/sourcenetwork/defradb/datastore"
)
func MakeSchemaMigrationReloadCommand() *cobra.Command {
@@ -23,12 +21,7 @@ func MakeSchemaMigrationReloadCommand() *cobra.Command {
Long: `Reload the schema migrations within DefraDB`,
RunE: func(cmd *cobra.Command, args []string) error {
store := mustGetContextStore(cmd)
-
- lens := store.LensRegistry()
- if tx, ok := cmd.Context().Value(txContextKey).(datastore.Txn); ok {
- lens = lens.WithTxn(tx)
- }
- return lens.ReloadLenses(cmd.Context())
+ return store.LensRegistry().ReloadLenses(cmd.Context())
},
}
return cmd
diff --git a/cli/schema_migration_up.go b/cli/schema_migration_up.go
index 577b87d4c7..4473c45911 100644
--- a/cli/schema_migration_up.go
+++ b/cli/schema_migration_up.go
@@ -17,8 +17,6 @@ import (
"github.com/sourcenetwork/immutable/enumerable"
"github.com/spf13/cobra"
-
- "github.com/sourcenetwork/defradb/datastore"
)
func MakeSchemaMigrationUpCommand() *cobra.Command {
@@ -67,12 +65,7 @@ Example: migrate from stdin
if err := json.Unmarshal(srcData, &src); err != nil {
return err
}
- lens := store.LensRegistry()
- if tx, ok := cmd.Context().Value(txContextKey).(datastore.Txn); ok {
- lens = lens.WithTxn(tx)
- }
-
- out, err := lens.MigrateUp(cmd.Context(), enumerable.New(src), collectionID)
+ out, err := store.LensRegistry().MigrateUp(cmd.Context(), enumerable.New(src), collectionID)
if err != nil {
return err
}
diff --git a/cli/schema_patch.go b/cli/schema_patch.go
index 23f425396d..cf9224d204 100644
--- a/cli/schema_patch.go
+++ b/cli/schema_patch.go
@@ -37,7 +37,7 @@ Example: patch from an argument string:
defradb client schema patch '[{ "op": "add", "path": "...", "value": {...} }]' '{"lenses": [...'
Example: patch from file:
- defradb client schema patch -f patch.json
+ defradb client schema patch -p patch.json
Example: patch from stdin:
cat patch.json | defradb client schema patch -
diff --git a/cli/server_dump.go b/cli/server_dump.go
index eb364a247f..767b86f364 100644
--- a/cli/server_dump.go
+++ b/cli/server_dump.go
@@ -24,7 +24,7 @@ func MakeServerDumpCmd() *cobra.Command {
Short: "Dumps the state of the entire database",
RunE: func(cmd *cobra.Command, _ []string) error {
cfg := mustGetContextConfig(cmd)
- log.FeedbackInfo(cmd.Context(), "Dumping DB state...")
+ log.InfoContext(cmd.Context(), "Dumping DB state...")
if cfg.GetString("datastore.store") != configStoreBadger {
return errors.New("server-side dump is only supported for the Badger datastore")
diff --git a/cli/start.go b/cli/start.go
index d4e789cbc6..ca9267e7e9 100644
--- a/cli/start.go
+++ b/cli/start.go
@@ -50,6 +50,10 @@ func MakeStartCommand() *cobra.Command {
dbOpts := []db.Option{
db.WithUpdateEvents(),
db.WithMaxRetries(cfg.GetInt("datastore.MaxTxnRetries")),
+ // TODO-ACP: Infuture when we add support for the --no-acp flag when admin signatures are in,
+ // we can allow starting of db without acp. Currently that can only be done programmatically.
+ // https://github.com/sourcenetwork/defradb/issues/2271
+ db.WithACPInMemory(),
}
netOpts := []net.NodeOpt{
@@ -84,12 +88,17 @@ func MakeStartCommand() *cobra.Command {
// Running with memory store mode will always generate a random key.
// Adding support for an ephemeral mode and moving the key to the
// config would solve both of these issues.
- rootdir := mustGetContextRootDir(cmd)
- key, err := loadOrGeneratePrivateKey(filepath.Join(rootdir, "data", "key"))
+ rootDir := mustGetContextRootDir(cmd)
+ key, err := loadOrGeneratePrivateKey(filepath.Join(rootDir, "data", "key"))
if err != nil {
return err
}
netOpts = append(netOpts, net.WithPrivateKey(key))
+
+ // TODO-ACP: Infuture when we add support for the --no-acp flag when admin signatures are in,
+ // we can allow starting of db without acp. Currently that can only be done programmatically.
+ // https://github.com/sourcenetwork/defradb/issues/2271
+ dbOpts = append(dbOpts, db.WithACP(rootDir))
}
opts := []node.NodeOpt{
@@ -108,11 +117,11 @@ func MakeStartCommand() *cobra.Command {
defer func() {
if err := n.Close(cmd.Context()); err != nil {
- log.FeedbackErrorE(cmd.Context(), "Stopping DefraDB", err)
+ log.ErrorContextE(cmd.Context(), "Stopping DefraDB", err)
}
}()
- log.FeedbackInfo(cmd.Context(), "Starting DefraDB")
+ log.InfoContext(cmd.Context(), "Starting DefraDB")
if err := n.Start(cmd.Context()); err != nil {
return err
}
@@ -122,9 +131,9 @@ func MakeStartCommand() *cobra.Command {
select {
case <-cmd.Context().Done():
- log.FeedbackInfo(cmd.Context(), "Received context cancellation; shutting down...")
+ log.InfoContext(cmd.Context(), "Received context cancellation; shutting down...")
case <-signalCh:
- log.FeedbackInfo(cmd.Context(), "Received interrupt; shutting down...")
+ log.InfoContext(cmd.Context(), "Received interrupt; shutting down...")
}
return nil
diff --git a/cli/tx_create.go b/cli/tx_create.go
index da239b6943..5190ba20f7 100644
--- a/cli/tx_create.go
+++ b/cli/tx_create.go
@@ -13,7 +13,6 @@ package cli
import (
"github.com/spf13/cobra"
- "github.com/sourcenetwork/defradb/client"
"github.com/sourcenetwork/defradb/datastore"
)
@@ -25,7 +24,7 @@ func MakeTxCreateCommand() *cobra.Command {
Short: "Create a new DefraDB transaction.",
Long: `Create a new DefraDB transaction.`,
RunE: func(cmd *cobra.Command, args []string) (err error) {
- db := cmd.Context().Value(dbContextKey).(client.DB)
+ db := mustGetContextDB(cmd)
var tx datastore.Txn
if concurrent {
diff --git a/cli/utils.go b/cli/utils.go
index caeb282606..25af57528b 100644
--- a/cli/utils.go
+++ b/cli/utils.go
@@ -20,8 +20,9 @@ import (
"github.com/spf13/cobra"
"github.com/spf13/viper"
+ acpIdentity "github.com/sourcenetwork/defradb/acp/identity"
"github.com/sourcenetwork/defradb/client"
- "github.com/sourcenetwork/defradb/datastore"
+ "github.com/sourcenetwork/defradb/db"
"github.com/sourcenetwork/defradb/http"
)
@@ -32,17 +33,8 @@ var (
cfgContextKey = contextKey("cfg")
// rootDirContextKey is the context key for the root directory.
rootDirContextKey = contextKey("rootDir")
- // txContextKey is the context key for the datastore.Txn
- //
- // This will only be set if a transaction id is specified.
- txContextKey = contextKey("tx")
// dbContextKey is the context key for the client.DB
dbContextKey = contextKey("db")
- // storeContextKey is the context key for the client.Store
- //
- // If a transaction exists, all operations will be executed
- // in the current transaction context.
- storeContextKey = contextKey("store")
// colContextKey is the context key for the client.Collection
//
// If a transaction exists, all operations will be executed
@@ -50,11 +42,18 @@ var (
colContextKey = contextKey("col")
)
+// mustGetContextDB returns the db for the current command context.
+//
+// If a db is not set in the current context this function panics.
+func mustGetContextDB(cmd *cobra.Command) client.DB {
+ return cmd.Context().Value(dbContextKey).(client.DB)
+}
+
// mustGetContextStore returns the store for the current command context.
//
// If a store is not set in the current context this function panics.
func mustGetContextStore(cmd *cobra.Command) client.Store {
- return cmd.Context().Value(storeContextKey).(client.Store)
+ return cmd.Context().Value(dbContextKey).(client.Store)
}
// mustGetContextP2P returns the p2p implementation for the current command context.
@@ -85,6 +84,18 @@ func tryGetContextCollection(cmd *cobra.Command) (client.Collection, bool) {
return col, ok
}
+// setContextDB sets the db for the current command context.
+func setContextDB(cmd *cobra.Command) error {
+ cfg := mustGetContextConfig(cmd)
+ db, err := http.NewClient(cfg.GetString("api.address"))
+ if err != nil {
+ return err
+ }
+ ctx := context.WithValue(cmd.Context(), dbContextKey, db)
+ cmd.SetContext(ctx)
+ return nil
+}
+
// setContextConfig sets teh config for the current command context.
func setContextConfig(cmd *cobra.Command) error {
rootdir := mustGetContextRootDir(cmd)
@@ -108,24 +119,18 @@ func setContextTransaction(cmd *cobra.Command, txId uint64) error {
if err != nil {
return err
}
- ctx := context.WithValue(cmd.Context(), txContextKey, tx)
+ ctx := db.SetContextTxn(cmd.Context(), tx)
cmd.SetContext(ctx)
return nil
}
-// setContextStore sets the store for the current command context.
-func setContextStore(cmd *cobra.Command) error {
- cfg := mustGetContextConfig(cmd)
- db, err := http.NewClient(cfg.GetString("api.address"))
- if err != nil {
- return err
- }
- ctx := context.WithValue(cmd.Context(), dbContextKey, db)
- if tx, ok := ctx.Value(txContextKey).(datastore.Txn); ok {
- ctx = context.WithValue(ctx, storeContextKey, db.WithTxn(tx))
- } else {
- ctx = context.WithValue(ctx, storeContextKey, db)
+// setContextIdentity sets the identity for the current command context.
+func setContextIdentity(cmd *cobra.Command, identity string) error {
+ // TODO-ACP: `https://github.com/sourcenetwork/defradb/issues/2358` do the validation here.
+ if identity == "" {
+ return nil
}
+ ctx := db.SetContextIdentity(cmd.Context(), acpIdentity.New(identity))
cmd.SetContext(ctx)
return nil
}
diff --git a/client/README.md b/client/README.md
new file mode 100644
index 0000000000..ec2cf7efcd
--- /dev/null
+++ b/client/README.md
@@ -0,0 +1,3 @@
+The `client` package is the primary access point for interacting with an embedded DefraDB instance.
+
+[Data definition overview](./data_definition.md) - How the shape of documents are defined and grouped.
diff --git a/client/collection.go b/client/collection.go
index 58b53c3af0..38c309a0e8 100644
--- a/client/collection.go
+++ b/client/collection.go
@@ -14,8 +14,6 @@ import (
"context"
"github.com/sourcenetwork/immutable"
-
- "github.com/sourcenetwork/defradb/datastore"
)
// Collection represents a defradb collection.
@@ -46,12 +44,12 @@ type Collection interface {
// Create a new document.
//
// Will verify the DocID/CID to ensure that the new document is correctly formatted.
- Create(context.Context, *Document) error
+ Create(ctx context.Context, doc *Document) error
// CreateMany new documents.
//
// Will verify the DocIDs/CIDs to ensure that the new documents are correctly formatted.
- CreateMany(context.Context, []*Document) error
+ CreateMany(ctx context.Context, docs []*Document) error
// Update an existing document with the new values.
//
@@ -59,100 +57,54 @@ type Collection interface {
// Any field that is nil/empty that hasn't called Clear will be ignored.
//
// Will return a ErrDocumentNotFound error if the given document is not found.
- Update(context.Context, *Document) error
+ Update(ctx context.Context, docs *Document) error
// Save the given document in the database.
//
// If a document exists with the given DocID it will update it. Otherwise a new document
// will be created.
- Save(context.Context, *Document) error
+ Save(ctx context.Context, doc *Document) error
// Delete will attempt to delete a document by DocID.
//
// Will return true if a deletion is successful, and return false along with an error
// if it cannot. If the document doesn't exist, then it will return false and a ErrDocumentNotFound error.
- // This operation will hard-delete all state relating to the given DocID. This includes data, block, and head storage.
- Delete(context.Context, DocID) (bool, error)
+ // This operation will hard-delete all state relating to the given DocID.
+ // This includes data, block, and head storage.
+ Delete(ctx context.Context, docID DocID) (bool, error)
// Exists checks if a given document exists with supplied DocID.
//
// Will return true if a matching document exists, otherwise will return false.
- Exists(context.Context, DocID) (bool, error)
-
- // UpdateWith updates a target document using the given updater type.
- //
- // Target can be a Filter statement, a single DocID, a single document,
- // an array of DocIDs, or an array of documents.
- // It is recommended to use the respective typed versions of Update
- // (e.g. UpdateWithFilter or UpdateWithDocID) over this function if you can.
- //
- // Returns an ErrInvalidUpdateTarget error if the target type is not supported.
- // Returns an ErrInvalidUpdater error if the updater type is not supported.
- UpdateWith(ctx context.Context, target any, updater string) (*UpdateResult, error)
+ Exists(ctx context.Context, docID DocID) (bool, error)
// UpdateWithFilter updates using a filter to target documents for update.
//
// The provided updater must be a string Patch, string Merge Patch, a parsed Patch, or parsed Merge Patch
// else an ErrInvalidUpdater will be returned.
- UpdateWithFilter(ctx context.Context, filter any, updater string) (*UpdateResult, error)
-
- // UpdateWithDocID updates using a DocID to target a single document for update.
- //
- // The provided updater must be a string Patch, string Merge Patch, a parsed Patch, or parsed Merge Patch
- // else an ErrInvalidUpdater will be returned.
- //
- // Returns an ErrDocumentNotFound if a document matching the given DocID is not found.
- UpdateWithDocID(ctx context.Context, docID DocID, updater string) (*UpdateResult, error)
-
- // UpdateWithDocIDs updates documents matching the given DocIDs.
- //
- // The provided updater must be a string Patch, string Merge Patch, a parsed Patch, or parsed Merge Patch
- // else an ErrInvalidUpdater will be returned.
- //
- // Returns an ErrDocumentNotFound if a document is not found for any given DocID.
- UpdateWithDocIDs(context.Context, []DocID, string) (*UpdateResult, error)
-
- // DeleteWith deletes a target document.
- //
- // Target can be a Filter statement, a single DocID, a single document, an array of DocIDs,
- // or an array of documents. It is recommended to use the respective typed versions of Delete
- // (e.g. DeleteWithFilter or DeleteWithDocID) over this function if you can.
- // This operation will soft-delete documents related to the given DocID and update the composite block
- // with a status of `Deleted`.
- //
- // Returns an ErrInvalidDeleteTarget if the target type is not supported.
- DeleteWith(ctx context.Context, target any) (*DeleteResult, error)
+ UpdateWithFilter(
+ ctx context.Context,
+ filter any,
+ updater string,
+ ) (*UpdateResult, error)
// DeleteWithFilter deletes documents matching the given filter.
//
// This operation will soft-delete documents related to the given filter and update the composite block
// with a status of `Deleted`.
- DeleteWithFilter(ctx context.Context, filter any) (*DeleteResult, error)
-
- // DeleteWithDocID deletes using a DocID to target a single document for delete.
- //
- // This operation will soft-delete documents related to the given DocID and update the composite block
- // with a status of `Deleted`.
- //
- // Returns an ErrDocumentNotFound if a document matching the given DocID is not found.
- DeleteWithDocID(context.Context, DocID) (*DeleteResult, error)
-
- // DeleteWithDocIDs deletes documents matching the given DocIDs.
- //
- // This operation will soft-delete documents related to the given DocIDs and update the composite block
- // with a status of `Deleted`.
- //
- // Returns an ErrDocumentNotFound if a document is not found for any given DocID.
- DeleteWithDocIDs(context.Context, []DocID) (*DeleteResult, error)
+ DeleteWithFilter(
+ ctx context.Context,
+ filter any,
+ ) (*DeleteResult, error)
// Get returns the document with the given DocID.
//
// Returns an ErrDocumentNotFound if a document matching the given DocID is not found.
- Get(ctx context.Context, docID DocID, showDeleted bool) (*Document, error)
-
- // WithTxn returns a new instance of the collection, with a transaction
- // handle instead of a raw DB handle.
- WithTxn(datastore.Txn) Collection
+ Get(
+ ctx context.Context,
+ docID DocID,
+ showDeleted bool,
+ ) (*Document, error)
// GetAllDocIDs returns all the document IDs that exist in the collection.
GetAllDocIDs(ctx context.Context) (<-chan DocIDResult, error)
@@ -162,6 +114,7 @@ type Collection interface {
// `IndexDescription.Name` must start with a letter or an underscore and can
// only contain letters, numbers, and underscores.
// If the name of the index is not provided, it will be generated.
+ // WARNING: This method can not create index for a collection that has a policy.
CreateIndex(context.Context, IndexDescription) (IndexDescription, error)
// DropIndex drops an index from the collection.
diff --git a/client/descriptions.go b/client/collection_description.go
similarity index 50%
rename from client/descriptions.go
rename to client/collection_description.go
index dd12e9cf00..aa22bf7121 100644
--- a/client/descriptions.go
+++ b/client/collection_description.go
@@ -1,4 +1,4 @@
-// Copyright 2022 Democratized Data Foundation
+// Copyright 2024 Democratized Data Foundation
//
// Use of this software is governed by the Business Source License
// included in the file licenses/BSL.txt.
@@ -60,75 +60,25 @@ type CollectionDescription struct {
// - [CollectionSource]
Sources []any
- // Fields contains the fields within this Collection.
+ // Fields contains the fields local to the node within this Collection.
+ //
+ // Most fields defined here will also be present on the [SchemaDescription]. A notable
+ // exception to this are the fields of the (optional) secondary side of a relation
+ // which are local only, and will not be present on the [SchemaDescription].
Fields []CollectionFieldDescription
// Indexes contains the secondary indexes that this Collection has.
Indexes []IndexDescription
-}
-// IDString returns the collection ID as a string.
-func (col CollectionDescription) IDString() string {
- return fmt.Sprint(col.ID)
-}
-
-// GetFieldByName returns the field for the given field name. If such a field is found it
-// will return it and true, if it is not found it will return false.
-func (col CollectionDescription) GetFieldByName(fieldName string) (CollectionFieldDescription, bool) {
- for _, field := range col.Fields {
- if field.Name == fieldName {
- return field, true
- }
- }
- return CollectionFieldDescription{}, false
-}
-
-// GetFieldByName returns the field for the given field name. If such a field is found it
-// will return it and true, if it is not found it will return false.
-func (s SchemaDescription) GetFieldByName(fieldName string) (SchemaFieldDescription, bool) {
- for _, field := range s.Fields {
- if field.Name == fieldName {
- return field, true
- }
- }
- return SchemaFieldDescription{}, false
-}
-
-// GetFieldByRelation returns the field that supports the relation of the given name.
-func (col CollectionDescription) GetFieldByRelation(
- relationName string,
- otherCollectionName string,
- otherFieldName string,
- schema *SchemaDescription,
-) (SchemaFieldDescription, bool) {
- for _, field := range schema.Fields {
- if field.RelationName == relationName &&
- !(col.Name.Value() == otherCollectionName && otherFieldName == field.Name) &&
- field.Kind != FieldKind_DocID {
- return field, true
- }
- }
- return SchemaFieldDescription{}, false
-}
-
-// QuerySources returns all the Sources of type [QuerySource]
-func (col CollectionDescription) QuerySources() []*QuerySource {
- return sourcesOfType[*QuerySource](col)
-}
-
-// CollectionSources returns all the Sources of type [CollectionSource]
-func (col CollectionDescription) CollectionSources() []*CollectionSource {
- return sourcesOfType[*CollectionSource](col)
-}
-
-func sourcesOfType[ResultType any](col CollectionDescription) []ResultType {
- result := []ResultType{}
- for _, source := range col.Sources {
- if typedSource, isOfType := source.(ResultType); isOfType {
- result = append(result, typedSource)
- }
- }
- return result
+ // Policy contains the policy information on this collection.
+ //
+ // It is possible for a collection to not have a policy, a collection
+ // without a policy has no access control.
+ //
+ // Note: The policy information must be validated using acp right after
+ // parsing is done, to avoid storing an invalid policyID or policy resource
+ // that may not even exist on acp.
+ Policy immutable.Option[PolicyDescription]
}
// QuerySource represents a collection data source from a query.
@@ -169,213 +119,56 @@ type CollectionSource struct {
Transform immutable.Option[model.Lens]
}
-// SchemaDescription describes a Schema and its associated metadata.
-type SchemaDescription struct {
- // Root is the version agnostic identifier for this schema.
- //
- // It remains constant throughout the lifetime of this schema.
- Root string
-
- // VersionID is the version-specific identifier for this schema.
- //
- // It is generated on mutation of this schema and can be used to uniquely
- // identify a schema at a specific version.
- VersionID string
-
- // Name is the name of this Schema.
- //
- // It is currently used to define the Collection Name, and as such these two properties
- // will currently share the same name.
- //
- // It is immutable.
- Name string
-
- // Fields contains the fields within this Schema.
- //
- // Currently new fields may be added after initial declaration, but they cannot be removed.
- Fields []SchemaFieldDescription
+// IDString returns the collection ID as a string.
+func (col CollectionDescription) IDString() string {
+ return fmt.Sprint(col.ID)
}
-// FieldKind describes the type of a field.
-type FieldKind uint8
-
-func (f FieldKind) String() string {
- switch f {
- case FieldKind_DocID:
- return "ID"
- case FieldKind_NILLABLE_BOOL:
- return "Boolean"
- case FieldKind_NILLABLE_BOOL_ARRAY:
- return "[Boolean]"
- case FieldKind_BOOL_ARRAY:
- return "[Boolean!]"
- case FieldKind_NILLABLE_INT:
- return "Int"
- case FieldKind_NILLABLE_INT_ARRAY:
- return "[Int]"
- case FieldKind_INT_ARRAY:
- return "[Int!]"
- case FieldKind_NILLABLE_DATETIME:
- return "DateTime"
- case FieldKind_NILLABLE_FLOAT:
- return "Float"
- case FieldKind_NILLABLE_FLOAT_ARRAY:
- return "[Float]"
- case FieldKind_FLOAT_ARRAY:
- return "[Float!]"
- case FieldKind_NILLABLE_STRING:
- return "String"
- case FieldKind_NILLABLE_STRING_ARRAY:
- return "[String]"
- case FieldKind_STRING_ARRAY:
- return "[String!]"
- case FieldKind_NILLABLE_BLOB:
- return "Blob"
- case FieldKind_NILLABLE_JSON:
- return "JSON"
- default:
- return fmt.Sprint(uint8(f))
+// GetFieldByName returns the field for the given field name. If such a field is found it
+// will return it and true, if it is not found it will return false.
+func (col CollectionDescription) GetFieldByName(fieldName string) (CollectionFieldDescription, bool) {
+ for _, field := range col.Fields {
+ if field.Name == fieldName {
+ return field, true
+ }
}
+ return CollectionFieldDescription{}, false
}
-// IsObject returns true if this FieldKind is an object type.
-func (f FieldKind) IsObject() bool {
- return f == FieldKind_FOREIGN_OBJECT ||
- f == FieldKind_FOREIGN_OBJECT_ARRAY
-}
-
-// IsObjectArray returns true if this FieldKind is an object array type.
-func (f FieldKind) IsObjectArray() bool {
- return f == FieldKind_FOREIGN_OBJECT_ARRAY
-}
-
-// IsArray returns true if this FieldKind is an array type which includes inline arrays as well
-// as relation arrays.
-func (f FieldKind) IsArray() bool {
- return f == FieldKind_BOOL_ARRAY ||
- f == FieldKind_INT_ARRAY ||
- f == FieldKind_FLOAT_ARRAY ||
- f == FieldKind_STRING_ARRAY ||
- f == FieldKind_FOREIGN_OBJECT_ARRAY ||
- f == FieldKind_NILLABLE_BOOL_ARRAY ||
- f == FieldKind_NILLABLE_INT_ARRAY ||
- f == FieldKind_NILLABLE_FLOAT_ARRAY ||
- f == FieldKind_NILLABLE_STRING_ARRAY
-}
-
-// Note: These values are serialized and persisted in the database, avoid modifying existing values.
-const (
- FieldKind_None FieldKind = 0
- FieldKind_DocID FieldKind = 1
- FieldKind_NILLABLE_BOOL FieldKind = 2
- FieldKind_BOOL_ARRAY FieldKind = 3
- FieldKind_NILLABLE_INT FieldKind = 4
- FieldKind_INT_ARRAY FieldKind = 5
- FieldKind_NILLABLE_FLOAT FieldKind = 6
- FieldKind_FLOAT_ARRAY FieldKind = 7
- _ FieldKind = 8 // safe to repurpose (was never used)
- _ FieldKind = 9 // safe to repurpose (previously old field)
- FieldKind_NILLABLE_DATETIME FieldKind = 10
- FieldKind_NILLABLE_STRING FieldKind = 11
- FieldKind_STRING_ARRAY FieldKind = 12
- FieldKind_NILLABLE_BLOB FieldKind = 13
- FieldKind_NILLABLE_JSON FieldKind = 14
- _ FieldKind = 15 // safe to repurpose (was never used)
-
- // Embedded object, but accessed via foreign keys
- FieldKind_FOREIGN_OBJECT FieldKind = 16
-
- // Array of embedded objects, accessed via foreign keys
- FieldKind_FOREIGN_OBJECT_ARRAY FieldKind = 17
-
- FieldKind_NILLABLE_BOOL_ARRAY FieldKind = 18
- FieldKind_NILLABLE_INT_ARRAY FieldKind = 19
- FieldKind_NILLABLE_FLOAT_ARRAY FieldKind = 20
- FieldKind_NILLABLE_STRING_ARRAY FieldKind = 21
-)
-
-// FieldKindStringToEnumMapping maps string representations of [FieldKind] values to
-// their enum values.
-//
-// It is currently used to by [db.PatchSchema] to allow string representations of
-// [FieldKind] to be provided instead of their raw int values. This usage may expand
-// in the future. They currently roughly correspond to the GQL field types, but this
-// equality is not guaranteed.
-var FieldKindStringToEnumMapping = map[string]FieldKind{
- "ID": FieldKind_DocID,
- "Boolean": FieldKind_NILLABLE_BOOL,
- "[Boolean]": FieldKind_NILLABLE_BOOL_ARRAY,
- "[Boolean!]": FieldKind_BOOL_ARRAY,
- "Int": FieldKind_NILLABLE_INT,
- "[Int]": FieldKind_NILLABLE_INT_ARRAY,
- "[Int!]": FieldKind_INT_ARRAY,
- "DateTime": FieldKind_NILLABLE_DATETIME,
- "Float": FieldKind_NILLABLE_FLOAT,
- "[Float]": FieldKind_NILLABLE_FLOAT_ARRAY,
- "[Float!]": FieldKind_FLOAT_ARRAY,
- "String": FieldKind_NILLABLE_STRING,
- "[String]": FieldKind_NILLABLE_STRING_ARRAY,
- "[String!]": FieldKind_STRING_ARRAY,
- "Blob": FieldKind_NILLABLE_BLOB,
- "JSON": FieldKind_NILLABLE_JSON,
-}
-
-// RelationType describes the type of relation between two types.
-type RelationType uint8
-
-// FieldID is a unique identifier for a field in a schema.
-type FieldID uint32
-
-func (f FieldID) String() string {
- return fmt.Sprint(uint32(f))
-}
-
-// SchemaFieldDescription describes a field on a Schema and its associated metadata.
-type SchemaFieldDescription struct {
- // Name contains the name of this field.
- //
- // It is currently immutable.
- Name string
-
- // The data type that this field holds.
- //
- // Must contain a valid value. It is currently immutable.
- Kind FieldKind
-
- // Schema contains the schema name of the type this field contains if this field is
- // a relation field. Otherwise this will be empty.
- Schema string
-
- // RelationName the name of the relationship that this field represents if this field is
- // a relation field. Otherwise this will be empty.
- RelationName string
-
- // The CRDT Type of this field. If no type has been provided it will default to [LWW_REGISTER].
- //
- // It is currently immutable.
- Typ CType
-
- // If true, this is the primary half of a relation, otherwise is false.
- IsPrimaryRelation bool
+// GetFieldByRelation returns the field that supports the relation of the given name.
+func (col CollectionDescription) GetFieldByRelation(
+ relationName string,
+ otherCollectionName string,
+ otherFieldName string,
+) (CollectionFieldDescription, bool) {
+ for _, field := range col.Fields {
+ if field.RelationName.Value() == relationName &&
+ !(col.Name.Value() == otherCollectionName && otherFieldName == field.Name) &&
+ field.Kind.Value() != FieldKind_DocID {
+ return field, true
+ }
+ }
+ return CollectionFieldDescription{}, false
}
-// CollectionFieldDescription describes the local components of a field on a collection.
-type CollectionFieldDescription struct {
- // Name contains the name of the [SchemaFieldDescription] that this field uses.
- Name string
-
- // ID contains the local, internal ID of this field.
- ID FieldID
+// QuerySources returns all the Sources of type [QuerySource]
+func (col CollectionDescription) QuerySources() []*QuerySource {
+ return sourcesOfType[*QuerySource](col)
}
-// IsRelation returns true if this field is a relation.
-func (f SchemaFieldDescription) IsRelation() bool {
- return f.RelationName != ""
+// CollectionSources returns all the Sources of type [CollectionSource]
+func (col CollectionDescription) CollectionSources() []*CollectionSource {
+ return sourcesOfType[*CollectionSource](col)
}
-// IsSet returns true if the target relation type is set.
-func (m RelationType) IsSet(target RelationType) bool {
- return m&target > 0
+func sourcesOfType[ResultType any](col CollectionDescription) []ResultType {
+ result := []ResultType{}
+ for _, source := range col.Sources {
+ if typedSource, isOfType := source.(ResultType); isOfType {
+ result = append(result, typedSource)
+ }
+ }
+ return result
}
// collectionDescription is a private type used to facilitate the unmarshalling
@@ -386,6 +179,7 @@ type collectionDescription struct {
ID uint32
RootID uint32
SchemaVersionID string
+ Policy immutable.Option[PolicyDescription]
Indexes []IndexDescription
Fields []CollectionFieldDescription
@@ -407,6 +201,7 @@ func (c *CollectionDescription) UnmarshalJSON(bytes []byte) error {
c.Indexes = descMap.Indexes
c.Fields = descMap.Fields
c.Sources = make([]any, len(descMap.Sources))
+ c.Policy = descMap.Policy
for i, source := range descMap.Sources {
sourceJson, err := json.Marshal(source)
diff --git a/client/collection_field_description.go b/client/collection_field_description.go
new file mode 100644
index 0000000000..98b012d641
--- /dev/null
+++ b/client/collection_field_description.go
@@ -0,0 +1,78 @@
+// Copyright 2024 Democratized Data Foundation
+//
+// Use of this software is governed by the Business Source License
+// included in the file licenses/BSL.txt.
+//
+// As of the Change Date specified in that file, in accordance with
+// the Business Source License, use of this software will be governed
+// by the Apache License, Version 2.0, included in the file
+// licenses/APL.txt.
+
+package client
+
+import (
+ "encoding/json"
+ "fmt"
+
+ "github.com/sourcenetwork/immutable"
+)
+
+// FieldID is a unique identifier for a field in a schema.
+type FieldID uint32
+
+// CollectionFieldDescription describes the local components of a field on a collection.
+type CollectionFieldDescription struct {
+ // Name contains the name of the [SchemaFieldDescription] that this field uses.
+ Name string
+
+ // ID contains the local, internal ID of this field.
+ ID FieldID
+
+ // Kind contains the local field kind if this is a local-only field (e.g. the secondary
+ // side of a relation).
+ //
+ // If the field is globaly defined (on the Schema), this will be [None].
+ Kind immutable.Option[FieldKind]
+
+ // RelationName contains the name of this relation, if this field is part of a relationship.
+ //
+ // Otherwise will be [None].
+ RelationName immutable.Option[string]
+}
+
+func (f FieldID) String() string {
+ return fmt.Sprint(uint32(f))
+}
+
+// collectionFieldDescription is a private type used to facilitate the unmarshalling
+// of json to a [CollectionFieldDescription].
+type collectionFieldDescription struct {
+ Name string
+ ID FieldID
+ RelationName immutable.Option[string]
+
+ // Properties below this line are unmarshalled using custom logic in [UnmarshalJSON]
+ Kind json.RawMessage
+}
+
+func (f *CollectionFieldDescription) UnmarshalJSON(bytes []byte) error {
+ var descMap collectionFieldDescription
+ err := json.Unmarshal(bytes, &descMap)
+ if err != nil {
+ return err
+ }
+
+ f.Name = descMap.Name
+ f.ID = descMap.ID
+ f.RelationName = descMap.RelationName
+ kind, err := parseFieldKind(descMap.Kind)
+ if err != nil {
+ return err
+ }
+
+ if kind != FieldKind_None {
+ f.Kind = immutable.Some(kind)
+ }
+
+ return nil
+}
diff --git a/client/ctype.go b/client/ctype.go
index c5f792df86..f9d961ec3e 100644
--- a/client/ctype.go
+++ b/client/ctype.go
@@ -23,12 +23,13 @@ const (
OBJECT
COMPOSITE
PN_COUNTER
+ P_COUNTER
)
// IsSupportedFieldCType returns true if the type is supported as a document field type.
func (t CType) IsSupportedFieldCType() bool {
switch t {
- case NONE_CRDT, LWW_REGISTER, PN_COUNTER:
+ case NONE_CRDT, LWW_REGISTER, PN_COUNTER, P_COUNTER:
return true
default:
return false
@@ -38,7 +39,7 @@ func (t CType) IsSupportedFieldCType() bool {
// IsCompatibleWith returns true if the CRDT is compatible with the field kind
func (t CType) IsCompatibleWith(kind FieldKind) bool {
switch t {
- case PN_COUNTER:
+ case PN_COUNTER, P_COUNTER:
if kind == FieldKind_NILLABLE_INT || kind == FieldKind_NILLABLE_FLOAT {
return true
}
@@ -61,6 +62,8 @@ func (t CType) String() string {
return "composite"
case PN_COUNTER:
return "pncounter"
+ case P_COUNTER:
+ return "pcounter"
default:
return "unknown"
}
diff --git a/client/data_definition.md b/client/data_definition.md
new file mode 100644
index 0000000000..c0a197158e
--- /dev/null
+++ b/client/data_definition.md
@@ -0,0 +1,65 @@
+# Data Definition in a DefraDB instance
+
+Data held in a DefraDB instance is organized into [collections](#collections) of documents. [Collections](#collections) are [local](#local-definitions) groupings of documents that share the same [globally](#global-definitions) defined shape declared by a [schema](#schemas).
+
+## Local definitions
+
+Local definitions are specific to the node you are directly working with, they are not shared with, or assumed to be the same on other nodes in the network.
+
+Splitting local elements out from the global ones allows some local customization to the way data is organized within any given node. It also minimizes the amount of 'stuff' that must be kept consistent across the decentralized network in order to have a well behaving database.
+
+Local data definitions are always defined on the [collection](#collections).
+
+Examples include indexes, field IDs, and [lens transforms](https://docs.source.network/defradb/guides/schema-migration).
+
+## Global definitions
+
+Global definitions are consistent across all nodes in the decentralized network. This is enforced by the use of things like CIDs for schema versions. If a global definition was to differ across nodes, the different variations will be treated as a completely different definitions.
+
+Global data definitions are always defined on the [schema](#schemas).
+
+Examples include field names, field kinds and [CRDTs](https://docs.source.network/defradb/guides/merkle-crdt).
+
+## Collections
+
+Collections represent [local](#local-definitions), independently queryable datasets sharing the same shape.
+
+Collections are defined by the `CollectionDescription` struct. This can be mutated via the `PatchCollection` function.
+
+A collection will always have a [global](#global-definitions) shape defined by a single [schema](#schemas) version.
+
+### Versions
+
+`CollectionDescription` instances may be active or inactive. Inactive `CollectionDescription`s will not have a name, and cannot be queried.
+
+When a new [schema](#schemas) version is created and has a collection defined for it, a new `CollectionDescription` instance will be created and linked to the new schema version. The new `CollectionDescription` instance will share the same root ID as the previous, and may be active or inactive depending on what arguments the user defining the new schema specified.
+
+[Lens migrations](https://docs.source.network/defradb/guides/schema-migration) between collection versions may be defined. These are, like everything on the collection, [local](#local-definitions). They allow transformation of data between versions, allowing documents synced across the node network at one schema version to be presented to users at **query time** at another version.
+
+### Collection fields
+
+The set of fields on a `CollectionDescription` defines [local](#local-definitions) aspects to [globally](#global-definitions) defined fields on the collection's [schema](#schemas). The set may also include local-only fields that are not defined on the schema, and will not be synced to other nodes - currently these are limited the secondary side of a relationship defined between two collections.
+
+### Views
+
+Collections are not limited to representing writeable data. Collections can also represent views of written data.
+
+Views are collections with a `QuerySource` source in the `Sources` set. On query they will fetch data from the query defined on `QuerySource`, and then (optionally) apply a [Lens](https://github.com/lens-vm/lens) transform before yielding the results to the user. The query may point to another view, allowing views of views of views.
+
+Views may be defined using the `AddView` function.
+
+### Embedded types
+
+Some fields on a collection may represent a complex object, typically these will be a relationship to another collection, however they may instead represent and embedded type.
+
+Embedded types cannot exist or be queried outside of the context of their host collection, and thus are only defined as a [global](#global-definitions) shape represented by a [schema](#schemas) only.
+
+Related objects defined in a [view](#views) are embedded objects.
+
+## Schemas
+
+Schemas represent [global](#global-definitions) data shapes. They cannot host document data themselves or be queried, that is done via [collections](#collections).
+
+Schemas are defined by the `SchemaDescription` struct. They are immutable, however new versions can be created using the `PatchSchema` function.
+
+Multiple [collections](#collections) may reference the same schema.
diff --git a/client/db.go b/client/db.go
index 7b0cc8060f..c5cb95eb4b 100644
--- a/client/db.go
+++ b/client/db.go
@@ -42,9 +42,6 @@ type DB interface {
// can safely operate on it concurrently.
NewConcurrentTxn(context.Context, bool) (datastore.Txn, error)
- // WithTxn returns a new [client.Store] that respects the given transaction.
- WithTxn(datastore.Txn) Store
-
// Root returns the underlying root store, within which all data managed by DefraDB is held.
Root() datastore.RootStore
@@ -85,6 +82,18 @@ type DB interface {
//
// It is likely unwise to call this on a large database instance.
PrintDump(ctx context.Context) error
+
+ // AddPolicy adds policy to acp, if acp is available.
+ //
+ // If policy was successfully added to acp then a policyID is returned,
+ // otherwise if acp was not available then returns the following error:
+ // [client.ErrPolicyAddFailureNoACP]
+ //
+ // Detects the format of the policy automatically by assuming YAML format if JSON
+ // validation fails.
+ //
+ // Note: A policy can not be added without the creatorID (identity).
+ AddPolicy(ctx context.Context, policy string) (AddPolicyResult, error)
}
// Store contains the core DefraDB read-write operations.
@@ -120,6 +129,17 @@ type Store interface {
// A lens configuration may also be provided, it will be added to all collections using the schema.
PatchSchema(context.Context, string, immutable.Option[model.Lens], bool) error
+ // PatchCollection takes the given JSON patch string and applies it to the set of CollectionDescriptions
+ // present in the database.
+ //
+ // It will also update the GQL types used by the query system. It will error and not apply any of the
+ // requested, valid updates should the net result of the patch result in an invalid state. The
+ // individual operations defined in the patch do not need to result in a valid state, only the net result
+ // of the full patch.
+ //
+ // Currently only the collection name can be modified.
+ PatchCollection(context.Context, string) error
+
// SetActiveSchemaVersion activates all collection versions with the given schema version, and deactivates all
// those without it (if they share the same schema root).
//
@@ -216,7 +236,7 @@ type Store interface {
GetAllIndexes(context.Context) (map[CollectionName][]IndexDescription, error)
// ExecRequest executes the given GQL request against the [Store].
- ExecRequest(context.Context, string) *RequestResult
+ ExecRequest(ctx context.Context, request string) *RequestResult
}
// GQLResult represents the immediate results of a GQL request.
diff --git a/client/definitions.go b/client/definitions.go
index e521a69fcf..c04159f679 100644
--- a/client/definitions.go
+++ b/client/definitions.go
@@ -25,16 +25,28 @@ type CollectionDefinition struct {
// GetFieldByName returns the field for the given field name. If such a field is found it
// will return it and true, if it is not found it will return false.
func (def CollectionDefinition) GetFieldByName(fieldName string) (FieldDefinition, bool) {
- collectionField, ok := def.Description.GetFieldByName(fieldName)
- if ok {
- schemaField, ok := def.Schema.GetFieldByName(fieldName)
- if ok {
- return NewFieldDefinition(
- collectionField,
- schemaField,
- ), true
- }
+ collectionField, existsOnCollection := def.Description.GetFieldByName(fieldName)
+ schemaField, existsOnSchema := def.Schema.GetFieldByName(fieldName)
+
+ if existsOnCollection && existsOnSchema {
+ return NewFieldDefinition(
+ collectionField,
+ schemaField,
+ ), true
+ } else if existsOnCollection && !existsOnSchema {
+ // If the field exists only on the collection, it is a local only field, for example the
+ // secondary side of a relation.
+ return NewLocalFieldDefinition(
+ collectionField,
+ ), true
+ } else if !existsOnCollection && existsOnSchema {
+ // If the field only exist on the schema it is likely that this is a schema-only object
+ // definition, for example for an embedded object.
+ return NewSchemaOnlyFieldDefinition(
+ schemaField,
+ ), true
}
+
return FieldDefinition{}, false
}
@@ -42,6 +54,8 @@ func (def CollectionDefinition) GetFieldByName(fieldName string) (FieldDefinitio
// as a single set.
func (def CollectionDefinition) GetFields() []FieldDefinition {
fields := []FieldDefinition{}
+ localFieldNames := map[string]struct{}{}
+
for _, localField := range def.Description.Fields {
globalField, ok := def.Schema.GetFieldByName(localField.Name)
if ok {
@@ -49,11 +63,41 @@ func (def CollectionDefinition) GetFields() []FieldDefinition {
fields,
NewFieldDefinition(localField, globalField),
)
+ } else {
+ // This must be a local only field, for example the secondary side of a relation.
+ fields = append(
+ fields,
+ NewLocalFieldDefinition(localField),
+ )
+ }
+ localFieldNames[localField.Name] = struct{}{}
+ }
+
+ for _, schemaField := range def.Schema.Fields {
+ if _, ok := localFieldNames[schemaField.Name]; ok {
+ continue
}
+ // This must be a global only field, for example on an embedded object.
+ fields = append(
+ fields,
+ NewSchemaOnlyFieldDefinition(schemaField),
+ )
}
+
return fields
}
+// GetName gets the name of this definition.
+//
+// If the collection description has a name (e.g. it is an active collection) it will return that,
+// otherwise it will return the schema name.
+func (def CollectionDefinition) GetName() string {
+ if def.Description.Name.HasValue() {
+ return def.Description.Name.Value()
+ }
+ return def.Schema.Name
+}
+
// FieldDefinition describes the combined local and global set of properties that constitutes
// a field on a collection.
//
@@ -78,10 +122,6 @@ type FieldDefinition struct {
// Must contain a valid value. It is currently immutable.
Kind FieldKind
- // Schema contains the schema name of the type this field contains if this field is
- // a relation field. Otherwise this will be empty.
- Schema string
-
// RelationName the name of the relationship that this field represents if this field is
// a relation field. Otherwise this will be empty.
RelationName string
@@ -98,14 +138,39 @@ type FieldDefinition struct {
// NewFieldDefinition returns a new [FieldDefinition], combining the given local and global elements
// into a single object.
func NewFieldDefinition(local CollectionFieldDescription, global SchemaFieldDescription) FieldDefinition {
+ var kind FieldKind
+ if local.Kind.HasValue() {
+ kind = local.Kind.Value()
+ } else {
+ kind = global.Kind
+ }
+
return FieldDefinition{
Name: global.Name,
ID: local.ID,
- Kind: global.Kind,
- Schema: global.Schema,
- RelationName: global.RelationName,
+ Kind: kind,
+ RelationName: local.RelationName.Value(),
Typ: global.Typ,
- IsPrimaryRelation: global.IsPrimaryRelation,
+ IsPrimaryRelation: kind.IsObject() && !kind.IsArray(),
+ }
+}
+
+// NewLocalFieldDefinition returns a new [FieldDefinition] from the given local [CollectionFieldDescription].
+func NewLocalFieldDefinition(local CollectionFieldDescription) FieldDefinition {
+ return FieldDefinition{
+ Name: local.Name,
+ ID: local.ID,
+ Kind: local.Kind.Value(),
+ RelationName: local.RelationName.Value(),
+ }
+}
+
+// NewSchemaOnlyFieldDefinition returns a new [FieldDefinition] from the given global [SchemaFieldDescription].
+func NewSchemaOnlyFieldDefinition(global SchemaFieldDescription) FieldDefinition {
+ return FieldDefinition{
+ Name: global.Name,
+ Kind: global.Kind,
+ Typ: global.Typ,
}
}
diff --git a/client/document.go b/client/document.go
index 6c837260ba..4534e9fa33 100644
--- a/client/document.go
+++ b/client/document.go
@@ -66,28 +66,28 @@ type Document struct {
// marks if document has unsaved changes
isDirty bool
- schemaDescription SchemaDescription
+ collectionDefinition CollectionDefinition
}
-func newEmptyDoc(sd SchemaDescription) *Document {
+func newEmptyDoc(collectionDefinition CollectionDefinition) *Document {
return &Document{
- fields: make(map[string]Field),
- values: make(map[Field]*FieldValue),
- schemaDescription: sd,
+ fields: make(map[string]Field),
+ values: make(map[Field]*FieldValue),
+ collectionDefinition: collectionDefinition,
}
}
// NewDocWithID creates a new Document with a specified key.
-func NewDocWithID(docID DocID, sd SchemaDescription) *Document {
- doc := newEmptyDoc(sd)
+func NewDocWithID(docID DocID, collectionDefinition CollectionDefinition) *Document {
+ doc := newEmptyDoc(collectionDefinition)
doc.id = docID
return doc
}
// NewDocFromMap creates a new Document from a data map.
-func NewDocFromMap(data map[string]any, sd SchemaDescription) (*Document, error) {
+func NewDocFromMap(data map[string]any, collectionDefinition CollectionDefinition) (*Document, error) {
var err error
- doc := newEmptyDoc(sd)
+ doc := newEmptyDoc(collectionDefinition)
// check if document contains special _docID field
k, hasDocID := data[request.DocIDFieldName]
@@ -126,8 +126,8 @@ func IsJSONArray(obj []byte) bool {
}
// NewFromJSON creates a new instance of a Document from a raw JSON object byte array.
-func NewDocFromJSON(obj []byte, sd SchemaDescription) (*Document, error) {
- doc := newEmptyDoc(sd)
+func NewDocFromJSON(obj []byte, collectionDefinition CollectionDefinition) (*Document, error) {
+ doc := newEmptyDoc(collectionDefinition)
err := doc.SetWithJSON(obj)
if err != nil {
return nil, err
@@ -141,7 +141,7 @@ func NewDocFromJSON(obj []byte, sd SchemaDescription) (*Document, error) {
// ManyFromJSON creates a new slice of Documents from a raw JSON array byte array.
// It will return an error if the given byte array is not a valid JSON array.
-func NewDocsFromJSON(obj []byte, sd SchemaDescription) ([]*Document, error) {
+func NewDocsFromJSON(obj []byte, collectionDefinition CollectionDefinition) ([]*Document, error) {
v, err := fastjson.ParseBytes(obj)
if err != nil {
return nil, err
@@ -157,7 +157,7 @@ func NewDocsFromJSON(obj []byte, sd SchemaDescription) ([]*Document, error) {
if err != nil {
return nil, err
}
- doc := newEmptyDoc(sd)
+ doc := newEmptyDoc(collectionDefinition)
err = doc.setWithFastJSONObject(o)
if err != nil {
return nil, err
@@ -172,80 +172,130 @@ func NewDocsFromJSON(obj []byte, sd SchemaDescription) ([]*Document, error) {
return docs, nil
}
-// IsNillableKind returns true if the given FieldKind is nillable.
-func IsNillableKind(kind FieldKind) bool {
- switch kind {
- case FieldKind_NILLABLE_STRING, FieldKind_NILLABLE_BLOB, FieldKind_NILLABLE_JSON,
- FieldKind_NILLABLE_BOOL, FieldKind_NILLABLE_FLOAT, FieldKind_NILLABLE_DATETIME,
- FieldKind_NILLABLE_INT:
- return true
- default:
- return false
- }
-}
-
// validateFieldSchema takes a given value as an interface,
// and ensures it matches the supplied field description.
// It will do any minor parsing, like dates, and return
// the typed value again as an interface.
-func validateFieldSchema(val any, field SchemaFieldDescription) (any, error) {
- if IsNillableKind(field.Kind) {
+func validateFieldSchema(val any, field FieldDefinition) (NormalValue, error) {
+ if field.Kind.IsNillable() {
if val == nil {
- return nil, nil
+ return NewNormalNil(field.Kind)
}
if v, ok := val.(*fastjson.Value); ok && v.Type() == fastjson.TypeNull {
- return nil, nil
+ return NewNormalNil(field.Kind)
}
}
+ if field.Kind.IsObjectArray() {
+ return nil, NewErrFieldNotExist(field.Name)
+ }
+
+ if field.Kind.IsObject() {
+ v, err := getString(val)
+ if err != nil {
+ return nil, err
+ }
+ return NewNormalString(v), nil
+ }
+
switch field.Kind {
case FieldKind_DocID, FieldKind_NILLABLE_STRING, FieldKind_NILLABLE_BLOB:
- return getString(val)
+ v, err := getString(val)
+ if err != nil {
+ return nil, err
+ }
+ return NewNormalString(v), nil
case FieldKind_STRING_ARRAY:
- return getArray(val, getString)
+ v, err := getArray(val, getString)
+ if err != nil {
+ return nil, err
+ }
+ return NewNormalStringArray(v), nil
case FieldKind_NILLABLE_STRING_ARRAY:
- return getNillableArray(val, getString)
+ v, err := getNillableArray(val, getString)
+ if err != nil {
+ return nil, err
+ }
+ return NewNormalNillableStringArray(v), nil
case FieldKind_NILLABLE_BOOL:
- return getBool(val)
+ v, err := getBool(val)
+ if err != nil {
+ return nil, err
+ }
+ return NewNormalBool(v), nil
case FieldKind_BOOL_ARRAY:
- return getArray(val, getBool)
+ v, err := getArray(val, getBool)
+ if err != nil {
+ return nil, err
+ }
+ return NewNormalBoolArray(v), nil
case FieldKind_NILLABLE_BOOL_ARRAY:
- return getNillableArray(val, getBool)
+ v, err := getNillableArray(val, getBool)
+ if err != nil {
+ return nil, err
+ }
+ return NewNormalNillableBoolArray(v), nil
case FieldKind_NILLABLE_FLOAT:
- return getFloat64(val)
+ v, err := getFloat64(val)
+ if err != nil {
+ return nil, err
+ }
+ return NewNormalFloat(v), nil
case FieldKind_FLOAT_ARRAY:
- return getArray(val, getFloat64)
+ v, err := getArray(val, getFloat64)
+ if err != nil {
+ return nil, err
+ }
+ return NewNormalFloatArray(v), nil
case FieldKind_NILLABLE_FLOAT_ARRAY:
- return getNillableArray(val, getFloat64)
+ v, err := getNillableArray(val, getFloat64)
+ if err != nil {
+ return nil, err
+ }
+ return NewNormalNillableFloatArray(v), nil
case FieldKind_NILLABLE_DATETIME:
- return getDateTime(val)
+ v, err := getDateTime(val)
+ if err != nil {
+ return nil, err
+ }
+ return NewNormalTime(v), nil
case FieldKind_NILLABLE_INT:
- return getInt64(val)
+ v, err := getInt64(val)
+ if err != nil {
+ return nil, err
+ }
+ return NewNormalInt(v), nil
case FieldKind_INT_ARRAY:
- return getArray(val, getInt64)
+ v, err := getArray(val, getInt64)
+ if err != nil {
+ return nil, err
+ }
+ return NewNormalIntArray(v), nil
case FieldKind_NILLABLE_INT_ARRAY:
- return getNillableArray(val, getInt64)
-
- case FieldKind_FOREIGN_OBJECT:
- return getString(val)
-
- case FieldKind_FOREIGN_OBJECT_ARRAY:
- return nil, NewErrFieldOrAliasToFieldNotExist(field.Name)
+ v, err := getNillableArray(val, getInt64)
+ if err != nil {
+ return nil, err
+ }
+ return NewNormalNillableIntArray(v), nil
case FieldKind_NILLABLE_JSON:
- return getJSON(val)
+ v, err := getJSON(val)
+ if err != nil {
+ return nil, err
+ }
+ return NewNormalString(v), nil
}
return nil, NewErrUnhandledType("FieldKind", field.Kind)
@@ -538,15 +588,15 @@ func (doc *Document) setWithFastJSONObject(obj *fastjson.Object) error {
// Set the value of a field.
func (doc *Document) Set(field string, value any) error {
- fd, exists := doc.schemaDescription.GetFieldByName(field)
+ fd, exists := doc.collectionDefinition.GetFieldByName(field)
if !exists {
return NewErrFieldNotExist(field)
}
- if fd.IsRelation() && !fd.Kind.IsObjectArray() {
+ if fd.Kind.IsObject() && !fd.Kind.IsObjectArray() {
if !strings.HasSuffix(field, request.RelatedObjectID) {
field = field + request.RelatedObjectID
}
- fd, exists = doc.schemaDescription.GetFieldByName(field)
+ fd, exists = doc.collectionDefinition.GetFieldByName(field)
if !exists {
return NewErrFieldNotExist(field)
}
@@ -573,16 +623,13 @@ func (doc *Document) set(t CType, field string, value *FieldValue) error {
return nil
}
-func (doc *Document) setCBOR(t CType, field string, val any) error {
+func (doc *Document) setCBOR(t CType, field string, val NormalValue) error {
value := NewFieldValue(t, val)
return doc.set(t, field, value)
}
func (doc *Document) setAndParseObjectType(value map[string]any) error {
for k, v := range value {
- if v == nil {
- continue
- }
err := doc.Set(k, v)
if err != nil {
return err
diff --git a/client/document_test.go b/client/document_test.go
index 593876705f..a70e868e0e 100644
--- a/client/document_test.go
+++ b/client/document_test.go
@@ -16,6 +16,8 @@ import (
"github.com/stretchr/testify/assert"
"github.com/stretchr/testify/require"
+ "github.com/sourcenetwork/immutable"
+
ccid "github.com/sourcenetwork/defradb/core/cid"
)
@@ -27,8 +29,22 @@ var (
pref = ccid.NewDefaultSHA256PrefixV1()
- schemaDescriptions = []SchemaDescription{
- {
+ def = CollectionDefinition{
+ Description: CollectionDescription{
+ Name: immutable.Some("User"),
+ Fields: []CollectionFieldDescription{
+ {
+ Name: "Name",
+ },
+ {
+ Name: "Age",
+ },
+ {
+ Name: "Custom",
+ },
+ },
+ },
+ Schema: SchemaDescription{
Name: "User",
Fields: []SchemaFieldDescription{
{
@@ -52,7 +68,7 @@ var (
)
func TestNewFromJSON(t *testing.T) {
- doc, err := NewDocFromJSON(testJSONObj, schemaDescriptions[0])
+ doc, err := NewDocFromJSON(testJSONObj, def)
if err != nil {
t.Error("Error creating new doc from JSON:", err)
return
@@ -90,7 +106,7 @@ func TestNewFromJSON(t *testing.T) {
}
func TestSetWithJSON(t *testing.T) {
- doc, err := NewDocFromJSON(testJSONObj, schemaDescriptions[0])
+ doc, err := NewDocFromJSON(testJSONObj, def)
if err != nil {
t.Error("Error creating new doc from JSON:", err)
return
@@ -137,7 +153,7 @@ func TestSetWithJSON(t *testing.T) {
}
func TestNewDocsFromJSON_WithObjectInsteadOfArray_Error(t *testing.T) {
- _, err := NewDocsFromJSON(testJSONObj, schemaDescriptions[0])
+ _, err := NewDocsFromJSON(testJSONObj, def)
require.ErrorContains(t, err, "value doesn't contain array; it contains object")
}
@@ -147,7 +163,7 @@ func TestNewFromJSON_WithValidJSONFieldValue_NoError(t *testing.T) {
"Age": 26,
"Custom": "{\"tree\":\"maple\", \"age\": 260}"
}`)
- doc, err := NewDocFromJSON(objWithJSONField, schemaDescriptions[0])
+ doc, err := NewDocFromJSON(objWithJSONField, def)
if err != nil {
t.Error("Error creating new doc from JSON:", err)
return
@@ -177,7 +193,7 @@ func TestNewFromJSON_WithInvalidJSONFieldValue_Error(t *testing.T) {
"Age": 26,
"Custom": "{\"tree\":\"maple, \"age\": 260}"
}`)
- _, err := NewDocFromJSON(objWithJSONField, schemaDescriptions[0])
+ _, err := NewDocFromJSON(objWithJSONField, def)
require.ErrorContains(t, err, "invalid JSON payload. Payload: {\"tree\":\"maple, \"age\": 260}")
}
@@ -187,6 +203,6 @@ func TestNewFromJSON_WithInvalidJSONFieldValueSimpleString_Error(t *testing.T) {
"Age": 26,
"Custom": "blah"
}`)
- _, err := NewDocFromJSON(objWithJSONField, schemaDescriptions[0])
+ _, err := NewDocFromJSON(objWithJSONField, def)
require.ErrorContains(t, err, "invalid JSON payload. Payload: blah")
}
diff --git a/client/errors.go b/client/errors.go
index c86ac274c7..460392a030 100644
--- a/client/errors.go
+++ b/client/errors.go
@@ -22,15 +22,16 @@ const (
errParsingFailed string = "failed to parse argument"
errUninitializeProperty string = "invalid state, required property is uninitialized"
errMaxTxnRetries string = "reached maximum transaction reties"
- errRelationOneSided string = "relation must be defined on both schemas"
errCollectionNotFound string = "collection not found"
- errFieldOrAliasToFieldNotExist string = "The given field or alias to field does not exist"
errUnknownCRDT string = "unknown crdt"
errCRDTKindMismatch string = "CRDT type %s can't be assigned to field kind %s"
errInvalidCRDTType string = "CRDT type not supported"
errFailedToUnmarshalCollection string = "failed to unmarshal collection json"
errOperationNotPermittedOnNamelessCols string = "operation not permitted on nameless collection"
errInvalidJSONPayload string = "invalid JSON payload"
+ errCanNotNormalizeValue string = "can not normalize value"
+ errCanNotTurnNormalValueIntoArray string = "can not turn normal value into array"
+ errCanNotMakeNormalNilFromFieldKind string = "can not make normal nil from field kind"
)
// Errors returnable from this package.
@@ -44,13 +45,17 @@ var (
ErrOperationNotPermittedOnNamelessCols = errors.New(errOperationNotPermittedOnNamelessCols)
ErrFieldNotObject = errors.New("trying to access field on a non object type")
ErrValueTypeMismatch = errors.New("value does not match indicated type")
- ErrDocumentNotFound = errors.New("no document for the given ID exists")
+ ErrDocumentNotFoundOrNotAuthorized = errors.New("document not found or not authorized to access")
+ ErrPolicyAddFailureNoACP = errors.New("failure adding policy because ACP was not available")
ErrInvalidUpdateTarget = errors.New("the target document to update is of invalid type")
ErrInvalidUpdater = errors.New("the updater of a document is of invalid type")
ErrInvalidDeleteTarget = errors.New("the target document to delete is of invalid type")
ErrMalformedDocID = errors.New("malformed document ID, missing either version or cid")
ErrInvalidDocIDVersion = errors.New("invalid document ID version")
ErrInvalidJSONPayload = errors.New(errInvalidJSONPayload)
+ ErrCanNotNormalizeValue = errors.New(errCanNotNormalizeValue)
+ ErrCanNotTurnNormalValueIntoArray = errors.New(errCanNotTurnNormalValueIntoArray)
+ ErrCanNotMakeNormalNilFromFieldKind = errors.New(errCanNotMakeNormalNilFromFieldKind)
)
// NewErrFieldNotExist returns an error indicating that the given field does not exist.
@@ -75,6 +80,23 @@ func NewErrUnexpectedType[TExpected any](property string, actual any) error {
)
}
+// NewCanNotNormalizeValue returns an error indicating that the given value can not be normalized.
+func NewCanNotNormalizeValue(val any) error {
+ return errors.New(errCanNotNormalizeValue, errors.NewKV("Value", val))
+}
+
+// NewCanNotTurnNormalValueIntoArray returns an error indicating that the given value can not be
+// turned into an array.
+func NewCanNotTurnNormalValueIntoArray(val any) error {
+ return errors.New(errCanNotTurnNormalValueIntoArray, errors.NewKV("Value", val))
+}
+
+// NewCanNotMakeNormalNilFromFieldKind returns an error indicating that a normal nil value can not be
+// created from the given field kind.
+func NewCanNotMakeNormalNilFromFieldKind(kind FieldKind) error {
+ return errors.New(errCanNotMakeNormalNilFromFieldKind, errors.NewKV("Kind", kind))
+}
+
// NewErrUnhandledType returns an error indicating that the given value is of
// a type that is not handled.
func NewErrUnhandledType(property string, actual any) error {
@@ -106,14 +128,6 @@ func NewErrMaxTxnRetries(inner error) error {
return errors.Wrap(errMaxTxnRetries, inner)
}
-func NewErrRelationOneSided(fieldName string, typeName string) error {
- return errors.New(
- errRelationOneSided,
- errors.NewKV("Field", fieldName),
- errors.NewKV("Type", typeName),
- )
-}
-
func NewErrCollectionNotFoundForSchemaVersion(schemaVersionID string) error {
return errors.New(
errCollectionNotFound,
@@ -135,11 +149,6 @@ func NewErrUnknownCRDT(cType CType) error {
)
}
-// NewErrFieldOrAliasToFieldNotExist returns an error indicating that the given field or an alias field does not exist.
-func NewErrFieldOrAliasToFieldNotExist(name string) error {
- return errors.New(errFieldOrAliasToFieldNotExist, errors.NewKV("Name", name))
-}
-
func NewErrInvalidCRDTType(name, crdtType string) error {
return errors.New(
errInvalidCRDTType,
diff --git a/client/lens.go b/client/lens.go
index 1a6b423991..3f5befc604 100644
--- a/client/lens.go
+++ b/client/lens.go
@@ -15,8 +15,6 @@ import (
"github.com/lens-vm/lens/host-go/config/model"
"github.com/sourcenetwork/immutable/enumerable"
-
- "github.com/sourcenetwork/defradb/datastore"
)
// LensConfig represents the configuration of a Lens migration in Defra.
@@ -43,12 +41,6 @@ type LensConfig struct {
// LensRegistry exposes several useful thread-safe migration related functions which may
// be used to manage migrations.
type LensRegistry interface {
- // WithTxn returns a new LensRegistry scoped to the given transaction.
- //
- // WARNING: Currently this does not provide snapshot isolation, if other transactions are committed
- // after this has been created, the results of those commits will be visible within this scope.
- WithTxn(datastore.Txn) LensRegistry
-
// SetMigration caches the migration for the given collection ID. It does not persist the migration in long
// term storage, for that one should call [Store.SetMigration(ctx, cfg)].
//
diff --git a/client/mocks/collection.go b/client/mocks/collection.go
index 6e6c7afae3..7c227edd2b 100644
--- a/client/mocks/collection.go
+++ b/client/mocks/collection.go
@@ -7,8 +7,6 @@ import (
client "github.com/sourcenetwork/defradb/client"
- datastore "github.com/sourcenetwork/defradb/datastore"
-
immutable "github.com/sourcenetwork/immutable"
mock "github.com/stretchr/testify/mock"
@@ -27,13 +25,13 @@ func (_m *Collection) EXPECT() *Collection_Expecter {
return &Collection_Expecter{mock: &_m.Mock}
}
-// Create provides a mock function with given fields: _a0, _a1
-func (_m *Collection) Create(_a0 context.Context, _a1 *client.Document) error {
- ret := _m.Called(_a0, _a1)
+// Create provides a mock function with given fields: ctx, doc
+func (_m *Collection) Create(ctx context.Context, doc *client.Document) error {
+ ret := _m.Called(ctx, doc)
var r0 error
if rf, ok := ret.Get(0).(func(context.Context, *client.Document) error); ok {
- r0 = rf(_a0, _a1)
+ r0 = rf(ctx, doc)
} else {
r0 = ret.Error(0)
}
@@ -47,13 +45,13 @@ type Collection_Create_Call struct {
}
// Create is a helper method to define mock.On call
-// - _a0 context.Context
-// - _a1 *client.Document
-func (_e *Collection_Expecter) Create(_a0 interface{}, _a1 interface{}) *Collection_Create_Call {
- return &Collection_Create_Call{Call: _e.mock.On("Create", _a0, _a1)}
+// - ctx context.Context
+// - doc *client.Document
+func (_e *Collection_Expecter) Create(ctx interface{}, doc interface{}) *Collection_Create_Call {
+ return &Collection_Create_Call{Call: _e.mock.On("Create", ctx, doc)}
}
-func (_c *Collection_Create_Call) Run(run func(_a0 context.Context, _a1 *client.Document)) *Collection_Create_Call {
+func (_c *Collection_Create_Call) Run(run func(ctx context.Context, doc *client.Document)) *Collection_Create_Call {
_c.Call.Run(func(args mock.Arguments) {
run(args[0].(context.Context), args[1].(*client.Document))
})
@@ -70,6 +68,49 @@ func (_c *Collection_Create_Call) RunAndReturn(run func(context.Context, *client
return _c
}
+// CreateDocIndex provides a mock function with given fields: _a0, _a1
+func (_m *Collection) CreateDocIndex(_a0 context.Context, _a1 *client.Document) error {
+ ret := _m.Called(_a0, _a1)
+
+ var r0 error
+ if rf, ok := ret.Get(0).(func(context.Context, *client.Document) error); ok {
+ r0 = rf(_a0, _a1)
+ } else {
+ r0 = ret.Error(0)
+ }
+
+ return r0
+}
+
+// Collection_CreateDocIndex_Call is a *mock.Call that shadows Run/Return methods with type explicit version for method 'CreateDocIndex'
+type Collection_CreateDocIndex_Call struct {
+ *mock.Call
+}
+
+// CreateDocIndex is a helper method to define mock.On call
+// - _a0 context.Context
+// - _a1 *client.Document
+func (_e *Collection_Expecter) CreateDocIndex(_a0 interface{}, _a1 interface{}) *Collection_CreateDocIndex_Call {
+ return &Collection_CreateDocIndex_Call{Call: _e.mock.On("CreateDocIndex", _a0, _a1)}
+}
+
+func (_c *Collection_CreateDocIndex_Call) Run(run func(_a0 context.Context, _a1 *client.Document)) *Collection_CreateDocIndex_Call {
+ _c.Call.Run(func(args mock.Arguments) {
+ run(args[0].(context.Context), args[1].(*client.Document))
+ })
+ return _c
+}
+
+func (_c *Collection_CreateDocIndex_Call) Return(_a0 error) *Collection_CreateDocIndex_Call {
+ _c.Call.Return(_a0)
+ return _c
+}
+
+func (_c *Collection_CreateDocIndex_Call) RunAndReturn(run func(context.Context, *client.Document) error) *Collection_CreateDocIndex_Call {
+ _c.Call.Return(run)
+ return _c
+}
+
// CreateIndex provides a mock function with given fields: _a0, _a1
func (_m *Collection) CreateIndex(_a0 context.Context, _a1 client.IndexDescription) (client.IndexDescription, error) {
ret := _m.Called(_a0, _a1)
@@ -123,13 +164,13 @@ func (_c *Collection_CreateIndex_Call) RunAndReturn(run func(context.Context, cl
return _c
}
-// CreateMany provides a mock function with given fields: _a0, _a1
-func (_m *Collection) CreateMany(_a0 context.Context, _a1 []*client.Document) error {
- ret := _m.Called(_a0, _a1)
+// CreateMany provides a mock function with given fields: ctx, docs
+func (_m *Collection) CreateMany(ctx context.Context, docs []*client.Document) error {
+ ret := _m.Called(ctx, docs)
var r0 error
if rf, ok := ret.Get(0).(func(context.Context, []*client.Document) error); ok {
- r0 = rf(_a0, _a1)
+ r0 = rf(ctx, docs)
} else {
r0 = ret.Error(0)
}
@@ -143,13 +184,13 @@ type Collection_CreateMany_Call struct {
}
// CreateMany is a helper method to define mock.On call
-// - _a0 context.Context
-// - _a1 []*client.Document
-func (_e *Collection_Expecter) CreateMany(_a0 interface{}, _a1 interface{}) *Collection_CreateMany_Call {
- return &Collection_CreateMany_Call{Call: _e.mock.On("CreateMany", _a0, _a1)}
+// - ctx context.Context
+// - docs []*client.Document
+func (_e *Collection_Expecter) CreateMany(ctx interface{}, docs interface{}) *Collection_CreateMany_Call {
+ return &Collection_CreateMany_Call{Call: _e.mock.On("CreateMany", ctx, docs)}
}
-func (_c *Collection_CreateMany_Call) Run(run func(_a0 context.Context, _a1 []*client.Document)) *Collection_CreateMany_Call {
+func (_c *Collection_CreateMany_Call) Run(run func(ctx context.Context, docs []*client.Document)) *Collection_CreateMany_Call {
_c.Call.Run(func(args mock.Arguments) {
run(args[0].(context.Context), args[1].([]*client.Document))
})
@@ -207,23 +248,23 @@ func (_c *Collection_Definition_Call) RunAndReturn(run func() client.CollectionD
return _c
}
-// Delete provides a mock function with given fields: _a0, _a1
-func (_m *Collection) Delete(_a0 context.Context, _a1 client.DocID) (bool, error) {
- ret := _m.Called(_a0, _a1)
+// Delete provides a mock function with given fields: ctx, docID
+func (_m *Collection) Delete(ctx context.Context, docID client.DocID) (bool, error) {
+ ret := _m.Called(ctx, docID)
var r0 bool
var r1 error
if rf, ok := ret.Get(0).(func(context.Context, client.DocID) (bool, error)); ok {
- return rf(_a0, _a1)
+ return rf(ctx, docID)
}
if rf, ok := ret.Get(0).(func(context.Context, client.DocID) bool); ok {
- r0 = rf(_a0, _a1)
+ r0 = rf(ctx, docID)
} else {
r0 = ret.Get(0).(bool)
}
if rf, ok := ret.Get(1).(func(context.Context, client.DocID) error); ok {
- r1 = rf(_a0, _a1)
+ r1 = rf(ctx, docID)
} else {
r1 = ret.Error(1)
}
@@ -237,13 +278,13 @@ type Collection_Delete_Call struct {
}
// Delete is a helper method to define mock.On call
-// - _a0 context.Context
-// - _a1 client.DocID
-func (_e *Collection_Expecter) Delete(_a0 interface{}, _a1 interface{}) *Collection_Delete_Call {
- return &Collection_Delete_Call{Call: _e.mock.On("Delete", _a0, _a1)}
+// - ctx context.Context
+// - docID client.DocID
+func (_e *Collection_Expecter) Delete(ctx interface{}, docID interface{}) *Collection_Delete_Call {
+ return &Collection_Delete_Call{Call: _e.mock.On("Delete", ctx, docID)}
}
-func (_c *Collection_Delete_Call) Run(run func(_a0 context.Context, _a1 client.DocID)) *Collection_Delete_Call {
+func (_c *Collection_Delete_Call) Run(run func(ctx context.Context, docID client.DocID)) *Collection_Delete_Call {
_c.Call.Run(func(args mock.Arguments) {
run(args[0].(context.Context), args[1].(client.DocID))
})
@@ -260,167 +301,45 @@ func (_c *Collection_Delete_Call) RunAndReturn(run func(context.Context, client.
return _c
}
-// DeleteWith provides a mock function with given fields: ctx, target
-func (_m *Collection) DeleteWith(ctx context.Context, target interface{}) (*client.DeleteResult, error) {
- ret := _m.Called(ctx, target)
-
- var r0 *client.DeleteResult
- var r1 error
- if rf, ok := ret.Get(0).(func(context.Context, interface{}) (*client.DeleteResult, error)); ok {
- return rf(ctx, target)
- }
- if rf, ok := ret.Get(0).(func(context.Context, interface{}) *client.DeleteResult); ok {
- r0 = rf(ctx, target)
- } else {
- if ret.Get(0) != nil {
- r0 = ret.Get(0).(*client.DeleteResult)
- }
- }
-
- if rf, ok := ret.Get(1).(func(context.Context, interface{}) error); ok {
- r1 = rf(ctx, target)
- } else {
- r1 = ret.Error(1)
- }
-
- return r0, r1
-}
-
-// Collection_DeleteWith_Call is a *mock.Call that shadows Run/Return methods with type explicit version for method 'DeleteWith'
-type Collection_DeleteWith_Call struct {
- *mock.Call
-}
-
-// DeleteWith is a helper method to define mock.On call
-// - ctx context.Context
-// - target interface{}
-func (_e *Collection_Expecter) DeleteWith(ctx interface{}, target interface{}) *Collection_DeleteWith_Call {
- return &Collection_DeleteWith_Call{Call: _e.mock.On("DeleteWith", ctx, target)}
-}
-
-func (_c *Collection_DeleteWith_Call) Run(run func(ctx context.Context, target interface{})) *Collection_DeleteWith_Call {
- _c.Call.Run(func(args mock.Arguments) {
- run(args[0].(context.Context), args[1].(interface{}))
- })
- return _c
-}
-
-func (_c *Collection_DeleteWith_Call) Return(_a0 *client.DeleteResult, _a1 error) *Collection_DeleteWith_Call {
- _c.Call.Return(_a0, _a1)
- return _c
-}
-
-func (_c *Collection_DeleteWith_Call) RunAndReturn(run func(context.Context, interface{}) (*client.DeleteResult, error)) *Collection_DeleteWith_Call {
- _c.Call.Return(run)
- return _c
-}
-
-// DeleteWithDocID provides a mock function with given fields: _a0, _a1
-func (_m *Collection) DeleteWithDocID(_a0 context.Context, _a1 client.DocID) (*client.DeleteResult, error) {
+// DeleteDocIndex provides a mock function with given fields: _a0, _a1
+func (_m *Collection) DeleteDocIndex(_a0 context.Context, _a1 *client.Document) error {
ret := _m.Called(_a0, _a1)
- var r0 *client.DeleteResult
- var r1 error
- if rf, ok := ret.Get(0).(func(context.Context, client.DocID) (*client.DeleteResult, error)); ok {
- return rf(_a0, _a1)
- }
- if rf, ok := ret.Get(0).(func(context.Context, client.DocID) *client.DeleteResult); ok {
- r0 = rf(_a0, _a1)
- } else {
- if ret.Get(0) != nil {
- r0 = ret.Get(0).(*client.DeleteResult)
- }
- }
-
- if rf, ok := ret.Get(1).(func(context.Context, client.DocID) error); ok {
- r1 = rf(_a0, _a1)
- } else {
- r1 = ret.Error(1)
- }
-
- return r0, r1
-}
-
-// Collection_DeleteWithDocID_Call is a *mock.Call that shadows Run/Return methods with type explicit version for method 'DeleteWithDocID'
-type Collection_DeleteWithDocID_Call struct {
- *mock.Call
-}
-
-// DeleteWithDocID is a helper method to define mock.On call
-// - _a0 context.Context
-// - _a1 client.DocID
-func (_e *Collection_Expecter) DeleteWithDocID(_a0 interface{}, _a1 interface{}) *Collection_DeleteWithDocID_Call {
- return &Collection_DeleteWithDocID_Call{Call: _e.mock.On("DeleteWithDocID", _a0, _a1)}
-}
-
-func (_c *Collection_DeleteWithDocID_Call) Run(run func(_a0 context.Context, _a1 client.DocID)) *Collection_DeleteWithDocID_Call {
- _c.Call.Run(func(args mock.Arguments) {
- run(args[0].(context.Context), args[1].(client.DocID))
- })
- return _c
-}
-
-func (_c *Collection_DeleteWithDocID_Call) Return(_a0 *client.DeleteResult, _a1 error) *Collection_DeleteWithDocID_Call {
- _c.Call.Return(_a0, _a1)
- return _c
-}
-
-func (_c *Collection_DeleteWithDocID_Call) RunAndReturn(run func(context.Context, client.DocID) (*client.DeleteResult, error)) *Collection_DeleteWithDocID_Call {
- _c.Call.Return(run)
- return _c
-}
-
-// DeleteWithDocIDs provides a mock function with given fields: _a0, _a1
-func (_m *Collection) DeleteWithDocIDs(_a0 context.Context, _a1 []client.DocID) (*client.DeleteResult, error) {
- ret := _m.Called(_a0, _a1)
-
- var r0 *client.DeleteResult
- var r1 error
- if rf, ok := ret.Get(0).(func(context.Context, []client.DocID) (*client.DeleteResult, error)); ok {
- return rf(_a0, _a1)
- }
- if rf, ok := ret.Get(0).(func(context.Context, []client.DocID) *client.DeleteResult); ok {
+ var r0 error
+ if rf, ok := ret.Get(0).(func(context.Context, *client.Document) error); ok {
r0 = rf(_a0, _a1)
} else {
- if ret.Get(0) != nil {
- r0 = ret.Get(0).(*client.DeleteResult)
- }
- }
-
- if rf, ok := ret.Get(1).(func(context.Context, []client.DocID) error); ok {
- r1 = rf(_a0, _a1)
- } else {
- r1 = ret.Error(1)
+ r0 = ret.Error(0)
}
- return r0, r1
+ return r0
}
-// Collection_DeleteWithDocIDs_Call is a *mock.Call that shadows Run/Return methods with type explicit version for method 'DeleteWithDocIDs'
-type Collection_DeleteWithDocIDs_Call struct {
+// Collection_DeleteDocIndex_Call is a *mock.Call that shadows Run/Return methods with type explicit version for method 'DeleteDocIndex'
+type Collection_DeleteDocIndex_Call struct {
*mock.Call
}
-// DeleteWithDocIDs is a helper method to define mock.On call
+// DeleteDocIndex is a helper method to define mock.On call
// - _a0 context.Context
-// - _a1 []client.DocID
-func (_e *Collection_Expecter) DeleteWithDocIDs(_a0 interface{}, _a1 interface{}) *Collection_DeleteWithDocIDs_Call {
- return &Collection_DeleteWithDocIDs_Call{Call: _e.mock.On("DeleteWithDocIDs", _a0, _a1)}
+// - _a1 *client.Document
+func (_e *Collection_Expecter) DeleteDocIndex(_a0 interface{}, _a1 interface{}) *Collection_DeleteDocIndex_Call {
+ return &Collection_DeleteDocIndex_Call{Call: _e.mock.On("DeleteDocIndex", _a0, _a1)}
}
-func (_c *Collection_DeleteWithDocIDs_Call) Run(run func(_a0 context.Context, _a1 []client.DocID)) *Collection_DeleteWithDocIDs_Call {
+func (_c *Collection_DeleteDocIndex_Call) Run(run func(_a0 context.Context, _a1 *client.Document)) *Collection_DeleteDocIndex_Call {
_c.Call.Run(func(args mock.Arguments) {
- run(args[0].(context.Context), args[1].([]client.DocID))
+ run(args[0].(context.Context), args[1].(*client.Document))
})
return _c
}
-func (_c *Collection_DeleteWithDocIDs_Call) Return(_a0 *client.DeleteResult, _a1 error) *Collection_DeleteWithDocIDs_Call {
- _c.Call.Return(_a0, _a1)
+func (_c *Collection_DeleteDocIndex_Call) Return(_a0 error) *Collection_DeleteDocIndex_Call {
+ _c.Call.Return(_a0)
return _c
}
-func (_c *Collection_DeleteWithDocIDs_Call) RunAndReturn(run func(context.Context, []client.DocID) (*client.DeleteResult, error)) *Collection_DeleteWithDocIDs_Call {
+func (_c *Collection_DeleteDocIndex_Call) RunAndReturn(run func(context.Context, *client.Document) error) *Collection_DeleteDocIndex_Call {
_c.Call.Return(run)
return _c
}
@@ -564,23 +483,23 @@ func (_c *Collection_DropIndex_Call) RunAndReturn(run func(context.Context, stri
return _c
}
-// Exists provides a mock function with given fields: _a0, _a1
-func (_m *Collection) Exists(_a0 context.Context, _a1 client.DocID) (bool, error) {
- ret := _m.Called(_a0, _a1)
+// Exists provides a mock function with given fields: ctx, docID
+func (_m *Collection) Exists(ctx context.Context, docID client.DocID) (bool, error) {
+ ret := _m.Called(ctx, docID)
var r0 bool
var r1 error
if rf, ok := ret.Get(0).(func(context.Context, client.DocID) (bool, error)); ok {
- return rf(_a0, _a1)
+ return rf(ctx, docID)
}
if rf, ok := ret.Get(0).(func(context.Context, client.DocID) bool); ok {
- r0 = rf(_a0, _a1)
+ r0 = rf(ctx, docID)
} else {
r0 = ret.Get(0).(bool)
}
if rf, ok := ret.Get(1).(func(context.Context, client.DocID) error); ok {
- r1 = rf(_a0, _a1)
+ r1 = rf(ctx, docID)
} else {
r1 = ret.Error(1)
}
@@ -594,13 +513,13 @@ type Collection_Exists_Call struct {
}
// Exists is a helper method to define mock.On call
-// - _a0 context.Context
-// - _a1 client.DocID
-func (_e *Collection_Expecter) Exists(_a0 interface{}, _a1 interface{}) *Collection_Exists_Call {
- return &Collection_Exists_Call{Call: _e.mock.On("Exists", _a0, _a1)}
+// - ctx context.Context
+// - docID client.DocID
+func (_e *Collection_Expecter) Exists(ctx interface{}, docID interface{}) *Collection_Exists_Call {
+ return &Collection_Exists_Call{Call: _e.mock.On("Exists", ctx, docID)}
}
-func (_c *Collection_Exists_Call) Run(run func(_a0 context.Context, _a1 client.DocID)) *Collection_Exists_Call {
+func (_c *Collection_Exists_Call) Run(run func(ctx context.Context, docID client.DocID)) *Collection_Exists_Call {
_c.Call.Run(func(args mock.Arguments) {
run(args[0].(context.Context), args[1].(client.DocID))
})
@@ -863,13 +782,13 @@ func (_c *Collection_Name_Call) RunAndReturn(run func() immutable.Option[string]
return _c
}
-// Save provides a mock function with given fields: _a0, _a1
-func (_m *Collection) Save(_a0 context.Context, _a1 *client.Document) error {
- ret := _m.Called(_a0, _a1)
+// Save provides a mock function with given fields: ctx, doc
+func (_m *Collection) Save(ctx context.Context, doc *client.Document) error {
+ ret := _m.Called(ctx, doc)
var r0 error
if rf, ok := ret.Get(0).(func(context.Context, *client.Document) error); ok {
- r0 = rf(_a0, _a1)
+ r0 = rf(ctx, doc)
} else {
r0 = ret.Error(0)
}
@@ -883,13 +802,13 @@ type Collection_Save_Call struct {
}
// Save is a helper method to define mock.On call
-// - _a0 context.Context
-// - _a1 *client.Document
-func (_e *Collection_Expecter) Save(_a0 interface{}, _a1 interface{}) *Collection_Save_Call {
- return &Collection_Save_Call{Call: _e.mock.On("Save", _a0, _a1)}
+// - ctx context.Context
+// - doc *client.Document
+func (_e *Collection_Expecter) Save(ctx interface{}, doc interface{}) *Collection_Save_Call {
+ return &Collection_Save_Call{Call: _e.mock.On("Save", ctx, doc)}
}
-func (_c *Collection_Save_Call) Run(run func(_a0 context.Context, _a1 *client.Document)) *Collection_Save_Call {
+func (_c *Collection_Save_Call) Run(run func(ctx context.Context, doc *client.Document)) *Collection_Save_Call {
_c.Call.Run(func(args mock.Arguments) {
run(args[0].(context.Context), args[1].(*client.Document))
})
@@ -988,13 +907,13 @@ func (_c *Collection_SchemaRoot_Call) RunAndReturn(run func() string) *Collectio
return _c
}
-// Update provides a mock function with given fields: _a0, _a1
-func (_m *Collection) Update(_a0 context.Context, _a1 *client.Document) error {
- ret := _m.Called(_a0, _a1)
+// Update provides a mock function with given fields: ctx, docs
+func (_m *Collection) Update(ctx context.Context, docs *client.Document) error {
+ ret := _m.Called(ctx, docs)
var r0 error
if rf, ok := ret.Get(0).(func(context.Context, *client.Document) error); ok {
- r0 = rf(_a0, _a1)
+ r0 = rf(ctx, docs)
} else {
r0 = ret.Error(0)
}
@@ -1008,13 +927,13 @@ type Collection_Update_Call struct {
}
// Update is a helper method to define mock.On call
-// - _a0 context.Context
-// - _a1 *client.Document
-func (_e *Collection_Expecter) Update(_a0 interface{}, _a1 interface{}) *Collection_Update_Call {
- return &Collection_Update_Call{Call: _e.mock.On("Update", _a0, _a1)}
+// - ctx context.Context
+// - docs *client.Document
+func (_e *Collection_Expecter) Update(ctx interface{}, docs interface{}) *Collection_Update_Call {
+ return &Collection_Update_Call{Call: _e.mock.On("Update", ctx, docs)}
}
-func (_c *Collection_Update_Call) Run(run func(_a0 context.Context, _a1 *client.Document)) *Collection_Update_Call {
+func (_c *Collection_Update_Call) Run(run func(ctx context.Context, docs *client.Document)) *Collection_Update_Call {
_c.Call.Run(func(args mock.Arguments) {
run(args[0].(context.Context), args[1].(*client.Document))
})
@@ -1031,170 +950,46 @@ func (_c *Collection_Update_Call) RunAndReturn(run func(context.Context, *client
return _c
}
-// UpdateWith provides a mock function with given fields: ctx, target, updater
-func (_m *Collection) UpdateWith(ctx context.Context, target interface{}, updater string) (*client.UpdateResult, error) {
- ret := _m.Called(ctx, target, updater)
-
- var r0 *client.UpdateResult
- var r1 error
- if rf, ok := ret.Get(0).(func(context.Context, interface{}, string) (*client.UpdateResult, error)); ok {
- return rf(ctx, target, updater)
- }
- if rf, ok := ret.Get(0).(func(context.Context, interface{}, string) *client.UpdateResult); ok {
- r0 = rf(ctx, target, updater)
- } else {
- if ret.Get(0) != nil {
- r0 = ret.Get(0).(*client.UpdateResult)
- }
- }
-
- if rf, ok := ret.Get(1).(func(context.Context, interface{}, string) error); ok {
- r1 = rf(ctx, target, updater)
- } else {
- r1 = ret.Error(1)
- }
-
- return r0, r1
-}
-
-// Collection_UpdateWith_Call is a *mock.Call that shadows Run/Return methods with type explicit version for method 'UpdateWith'
-type Collection_UpdateWith_Call struct {
- *mock.Call
-}
-
-// UpdateWith is a helper method to define mock.On call
-// - ctx context.Context
-// - target interface{}
-// - updater string
-func (_e *Collection_Expecter) UpdateWith(ctx interface{}, target interface{}, updater interface{}) *Collection_UpdateWith_Call {
- return &Collection_UpdateWith_Call{Call: _e.mock.On("UpdateWith", ctx, target, updater)}
-}
-
-func (_c *Collection_UpdateWith_Call) Run(run func(ctx context.Context, target interface{}, updater string)) *Collection_UpdateWith_Call {
- _c.Call.Run(func(args mock.Arguments) {
- run(args[0].(context.Context), args[1].(interface{}), args[2].(string))
- })
- return _c
-}
-
-func (_c *Collection_UpdateWith_Call) Return(_a0 *client.UpdateResult, _a1 error) *Collection_UpdateWith_Call {
- _c.Call.Return(_a0, _a1)
- return _c
-}
-
-func (_c *Collection_UpdateWith_Call) RunAndReturn(run func(context.Context, interface{}, string) (*client.UpdateResult, error)) *Collection_UpdateWith_Call {
- _c.Call.Return(run)
- return _c
-}
-
-// UpdateWithDocID provides a mock function with given fields: ctx, docID, updater
-func (_m *Collection) UpdateWithDocID(ctx context.Context, docID client.DocID, updater string) (*client.UpdateResult, error) {
- ret := _m.Called(ctx, docID, updater)
-
- var r0 *client.UpdateResult
- var r1 error
- if rf, ok := ret.Get(0).(func(context.Context, client.DocID, string) (*client.UpdateResult, error)); ok {
- return rf(ctx, docID, updater)
- }
- if rf, ok := ret.Get(0).(func(context.Context, client.DocID, string) *client.UpdateResult); ok {
- r0 = rf(ctx, docID, updater)
- } else {
- if ret.Get(0) != nil {
- r0 = ret.Get(0).(*client.UpdateResult)
- }
- }
+// UpdateDocIndex provides a mock function with given fields: ctx, oldDoc, newDoc
+func (_m *Collection) UpdateDocIndex(ctx context.Context, oldDoc *client.Document, newDoc *client.Document) error {
+ ret := _m.Called(ctx, oldDoc, newDoc)
- if rf, ok := ret.Get(1).(func(context.Context, client.DocID, string) error); ok {
- r1 = rf(ctx, docID, updater)
+ var r0 error
+ if rf, ok := ret.Get(0).(func(context.Context, *client.Document, *client.Document) error); ok {
+ r0 = rf(ctx, oldDoc, newDoc)
} else {
- r1 = ret.Error(1)
+ r0 = ret.Error(0)
}
- return r0, r1
+ return r0
}
-// Collection_UpdateWithDocID_Call is a *mock.Call that shadows Run/Return methods with type explicit version for method 'UpdateWithDocID'
-type Collection_UpdateWithDocID_Call struct {
+// Collection_UpdateDocIndex_Call is a *mock.Call that shadows Run/Return methods with type explicit version for method 'UpdateDocIndex'
+type Collection_UpdateDocIndex_Call struct {
*mock.Call
}
-// UpdateWithDocID is a helper method to define mock.On call
+// UpdateDocIndex is a helper method to define mock.On call
// - ctx context.Context
-// - docID client.DocID
-// - updater string
-func (_e *Collection_Expecter) UpdateWithDocID(ctx interface{}, docID interface{}, updater interface{}) *Collection_UpdateWithDocID_Call {
- return &Collection_UpdateWithDocID_Call{Call: _e.mock.On("UpdateWithDocID", ctx, docID, updater)}
+// - oldDoc *client.Document
+// - newDoc *client.Document
+func (_e *Collection_Expecter) UpdateDocIndex(ctx interface{}, oldDoc interface{}, newDoc interface{}) *Collection_UpdateDocIndex_Call {
+ return &Collection_UpdateDocIndex_Call{Call: _e.mock.On("UpdateDocIndex", ctx, oldDoc, newDoc)}
}
-func (_c *Collection_UpdateWithDocID_Call) Run(run func(ctx context.Context, docID client.DocID, updater string)) *Collection_UpdateWithDocID_Call {
+func (_c *Collection_UpdateDocIndex_Call) Run(run func(ctx context.Context, oldDoc *client.Document, newDoc *client.Document)) *Collection_UpdateDocIndex_Call {
_c.Call.Run(func(args mock.Arguments) {
- run(args[0].(context.Context), args[1].(client.DocID), args[2].(string))
+ run(args[0].(context.Context), args[1].(*client.Document), args[2].(*client.Document))
})
return _c
}
-func (_c *Collection_UpdateWithDocID_Call) Return(_a0 *client.UpdateResult, _a1 error) *Collection_UpdateWithDocID_Call {
- _c.Call.Return(_a0, _a1)
- return _c
-}
-
-func (_c *Collection_UpdateWithDocID_Call) RunAndReturn(run func(context.Context, client.DocID, string) (*client.UpdateResult, error)) *Collection_UpdateWithDocID_Call {
- _c.Call.Return(run)
- return _c
-}
-
-// UpdateWithDocIDs provides a mock function with given fields: _a0, _a1, _a2
-func (_m *Collection) UpdateWithDocIDs(_a0 context.Context, _a1 []client.DocID, _a2 string) (*client.UpdateResult, error) {
- ret := _m.Called(_a0, _a1, _a2)
-
- var r0 *client.UpdateResult
- var r1 error
- if rf, ok := ret.Get(0).(func(context.Context, []client.DocID, string) (*client.UpdateResult, error)); ok {
- return rf(_a0, _a1, _a2)
- }
- if rf, ok := ret.Get(0).(func(context.Context, []client.DocID, string) *client.UpdateResult); ok {
- r0 = rf(_a0, _a1, _a2)
- } else {
- if ret.Get(0) != nil {
- r0 = ret.Get(0).(*client.UpdateResult)
- }
- }
-
- if rf, ok := ret.Get(1).(func(context.Context, []client.DocID, string) error); ok {
- r1 = rf(_a0, _a1, _a2)
- } else {
- r1 = ret.Error(1)
- }
-
- return r0, r1
-}
-
-// Collection_UpdateWithDocIDs_Call is a *mock.Call that shadows Run/Return methods with type explicit version for method 'UpdateWithDocIDs'
-type Collection_UpdateWithDocIDs_Call struct {
- *mock.Call
-}
-
-// UpdateWithDocIDs is a helper method to define mock.On call
-// - _a0 context.Context
-// - _a1 []client.DocID
-// - _a2 string
-func (_e *Collection_Expecter) UpdateWithDocIDs(_a0 interface{}, _a1 interface{}, _a2 interface{}) *Collection_UpdateWithDocIDs_Call {
- return &Collection_UpdateWithDocIDs_Call{Call: _e.mock.On("UpdateWithDocIDs", _a0, _a1, _a2)}
-}
-
-func (_c *Collection_UpdateWithDocIDs_Call) Run(run func(_a0 context.Context, _a1 []client.DocID, _a2 string)) *Collection_UpdateWithDocIDs_Call {
- _c.Call.Run(func(args mock.Arguments) {
- run(args[0].(context.Context), args[1].([]client.DocID), args[2].(string))
- })
- return _c
-}
-
-func (_c *Collection_UpdateWithDocIDs_Call) Return(_a0 *client.UpdateResult, _a1 error) *Collection_UpdateWithDocIDs_Call {
- _c.Call.Return(_a0, _a1)
+func (_c *Collection_UpdateDocIndex_Call) Return(_a0 error) *Collection_UpdateDocIndex_Call {
+ _c.Call.Return(_a0)
return _c
}
-func (_c *Collection_UpdateWithDocIDs_Call) RunAndReturn(run func(context.Context, []client.DocID, string) (*client.UpdateResult, error)) *Collection_UpdateWithDocIDs_Call {
+func (_c *Collection_UpdateDocIndex_Call) RunAndReturn(run func(context.Context, *client.Document, *client.Document) error) *Collection_UpdateDocIndex_Call {
_c.Call.Return(run)
return _c
}
@@ -1255,50 +1050,6 @@ func (_c *Collection_UpdateWithFilter_Call) RunAndReturn(run func(context.Contex
return _c
}
-// WithTxn provides a mock function with given fields: _a0
-func (_m *Collection) WithTxn(_a0 datastore.Txn) client.Collection {
- ret := _m.Called(_a0)
-
- var r0 client.Collection
- if rf, ok := ret.Get(0).(func(datastore.Txn) client.Collection); ok {
- r0 = rf(_a0)
- } else {
- if ret.Get(0) != nil {
- r0 = ret.Get(0).(client.Collection)
- }
- }
-
- return r0
-}
-
-// Collection_WithTxn_Call is a *mock.Call that shadows Run/Return methods with type explicit version for method 'WithTxn'
-type Collection_WithTxn_Call struct {
- *mock.Call
-}
-
-// WithTxn is a helper method to define mock.On call
-// - _a0 datastore.Txn
-func (_e *Collection_Expecter) WithTxn(_a0 interface{}) *Collection_WithTxn_Call {
- return &Collection_WithTxn_Call{Call: _e.mock.On("WithTxn", _a0)}
-}
-
-func (_c *Collection_WithTxn_Call) Run(run func(_a0 datastore.Txn)) *Collection_WithTxn_Call {
- _c.Call.Run(func(args mock.Arguments) {
- run(args[0].(datastore.Txn))
- })
- return _c
-}
-
-func (_c *Collection_WithTxn_Call) Return(_a0 client.Collection) *Collection_WithTxn_Call {
- _c.Call.Return(_a0)
- return _c
-}
-
-func (_c *Collection_WithTxn_Call) RunAndReturn(run func(datastore.Txn) client.Collection) *Collection_WithTxn_Call {
- _c.Call.Return(run)
- return _c
-}
-
// NewCollection creates a new instance of Collection. It also registers a testing interface on the mock and a cleanup function to assert the mocks expectations.
// The first argument is typically a *testing.T value.
func NewCollection(t interface {
diff --git a/client/mocks/db.go b/client/mocks/db.go
index aeb54ea4cd..20b5988fe7 100644
--- a/client/mocks/db.go
+++ b/client/mocks/db.go
@@ -32,6 +32,59 @@ func (_m *DB) EXPECT() *DB_Expecter {
return &DB_Expecter{mock: &_m.Mock}
}
+// AddPolicy provides a mock function with given fields: ctx, policy
+func (_m *DB) AddPolicy(ctx context.Context, policy string) (client.AddPolicyResult, error) {
+ ret := _m.Called(ctx, policy)
+
+ var r0 client.AddPolicyResult
+ var r1 error
+ if rf, ok := ret.Get(0).(func(context.Context, string) (client.AddPolicyResult, error)); ok {
+ return rf(ctx, policy)
+ }
+ if rf, ok := ret.Get(0).(func(context.Context, string) client.AddPolicyResult); ok {
+ r0 = rf(ctx, policy)
+ } else {
+ r0 = ret.Get(0).(client.AddPolicyResult)
+ }
+
+ if rf, ok := ret.Get(1).(func(context.Context, string) error); ok {
+ r1 = rf(ctx, policy)
+ } else {
+ r1 = ret.Error(1)
+ }
+
+ return r0, r1
+}
+
+// DB_AddPolicy_Call is a *mock.Call that shadows Run/Return methods with type explicit version for method 'AddPolicy'
+type DB_AddPolicy_Call struct {
+ *mock.Call
+}
+
+// AddPolicy is a helper method to define mock.On call
+// - ctx context.Context
+// - policy string
+func (_e *DB_Expecter) AddPolicy(ctx interface{}, policy interface{}) *DB_AddPolicy_Call {
+ return &DB_AddPolicy_Call{Call: _e.mock.On("AddPolicy", ctx, policy)}
+}
+
+func (_c *DB_AddPolicy_Call) Run(run func(ctx context.Context, policy string)) *DB_AddPolicy_Call {
+ _c.Call.Run(func(args mock.Arguments) {
+ run(args[0].(context.Context), args[1].(string))
+ })
+ return _c
+}
+
+func (_c *DB_AddPolicy_Call) Return(_a0 client.AddPolicyResult, _a1 error) *DB_AddPolicy_Call {
+ _c.Call.Return(_a0, _a1)
+ return _c
+}
+
+func (_c *DB_AddPolicy_Call) RunAndReturn(run func(context.Context, string) (client.AddPolicyResult, error)) *DB_AddPolicy_Call {
+ _c.Call.Return(run)
+ return _c
+}
+
// AddSchema provides a mock function with given fields: _a0, _a1
func (_m *DB) AddSchema(_a0 context.Context, _a1 string) ([]client.CollectionDescription, error) {
ret := _m.Called(_a0, _a1)
@@ -346,13 +399,13 @@ func (_c *DB_Events_Call) RunAndReturn(run func() events.Events) *DB_Events_Call
return _c
}
-// ExecRequest provides a mock function with given fields: _a0, _a1
-func (_m *DB) ExecRequest(_a0 context.Context, _a1 string) *client.RequestResult {
- ret := _m.Called(_a0, _a1)
+// ExecRequest provides a mock function with given fields: ctx, request
+func (_m *DB) ExecRequest(ctx context.Context, request string) *client.RequestResult {
+ ret := _m.Called(ctx, request)
var r0 *client.RequestResult
if rf, ok := ret.Get(0).(func(context.Context, string) *client.RequestResult); ok {
- r0 = rf(_a0, _a1)
+ r0 = rf(ctx, request)
} else {
if ret.Get(0) != nil {
r0 = ret.Get(0).(*client.RequestResult)
@@ -368,13 +421,13 @@ type DB_ExecRequest_Call struct {
}
// ExecRequest is a helper method to define mock.On call
-// - _a0 context.Context
-// - _a1 string
-func (_e *DB_Expecter) ExecRequest(_a0 interface{}, _a1 interface{}) *DB_ExecRequest_Call {
- return &DB_ExecRequest_Call{Call: _e.mock.On("ExecRequest", _a0, _a1)}
+// - ctx context.Context
+// - request string
+func (_e *DB_Expecter) ExecRequest(ctx interface{}, request interface{}) *DB_ExecRequest_Call {
+ return &DB_ExecRequest_Call{Call: _e.mock.On("ExecRequest", ctx, request)}
}
-func (_c *DB_ExecRequest_Call) Run(run func(_a0 context.Context, _a1 string)) *DB_ExecRequest_Call {
+func (_c *DB_ExecRequest_Call) Run(run func(ctx context.Context, request string)) *DB_ExecRequest_Call {
_c.Call.Run(func(args mock.Arguments) {
run(args[0].(context.Context), args[1].(string))
})
@@ -857,6 +910,49 @@ func (_c *DB_NewTxn_Call) RunAndReturn(run func(context.Context, bool) (datastor
return _c
}
+// PatchCollection provides a mock function with given fields: _a0, _a1
+func (_m *DB) PatchCollection(_a0 context.Context, _a1 string) error {
+ ret := _m.Called(_a0, _a1)
+
+ var r0 error
+ if rf, ok := ret.Get(0).(func(context.Context, string) error); ok {
+ r0 = rf(_a0, _a1)
+ } else {
+ r0 = ret.Error(0)
+ }
+
+ return r0
+}
+
+// DB_PatchCollection_Call is a *mock.Call that shadows Run/Return methods with type explicit version for method 'PatchCollection'
+type DB_PatchCollection_Call struct {
+ *mock.Call
+}
+
+// PatchCollection is a helper method to define mock.On call
+// - _a0 context.Context
+// - _a1 string
+func (_e *DB_Expecter) PatchCollection(_a0 interface{}, _a1 interface{}) *DB_PatchCollection_Call {
+ return &DB_PatchCollection_Call{Call: _e.mock.On("PatchCollection", _a0, _a1)}
+}
+
+func (_c *DB_PatchCollection_Call) Run(run func(_a0 context.Context, _a1 string)) *DB_PatchCollection_Call {
+ _c.Call.Run(func(args mock.Arguments) {
+ run(args[0].(context.Context), args[1].(string))
+ })
+ return _c
+}
+
+func (_c *DB_PatchCollection_Call) Return(_a0 error) *DB_PatchCollection_Call {
+ _c.Call.Return(_a0)
+ return _c
+}
+
+func (_c *DB_PatchCollection_Call) RunAndReturn(run func(context.Context, string) error) *DB_PatchCollection_Call {
+ _c.Call.Return(run)
+ return _c
+}
+
// PatchSchema provides a mock function with given fields: _a0, _a1, _a2, _a3
func (_m *DB) PatchSchema(_a0 context.Context, _a1 string, _a2 immutable.Option[model.Lens], _a3 bool) error {
ret := _m.Called(_a0, _a1, _a2, _a3)
@@ -1116,50 +1212,6 @@ func (_c *DB_SetMigration_Call) RunAndReturn(run func(context.Context, client.Le
return _c
}
-// WithTxn provides a mock function with given fields: _a0
-func (_m *DB) WithTxn(_a0 datastore.Txn) client.Store {
- ret := _m.Called(_a0)
-
- var r0 client.Store
- if rf, ok := ret.Get(0).(func(datastore.Txn) client.Store); ok {
- r0 = rf(_a0)
- } else {
- if ret.Get(0) != nil {
- r0 = ret.Get(0).(client.Store)
- }
- }
-
- return r0
-}
-
-// DB_WithTxn_Call is a *mock.Call that shadows Run/Return methods with type explicit version for method 'WithTxn'
-type DB_WithTxn_Call struct {
- *mock.Call
-}
-
-// WithTxn is a helper method to define mock.On call
-// - _a0 datastore.Txn
-func (_e *DB_Expecter) WithTxn(_a0 interface{}) *DB_WithTxn_Call {
- return &DB_WithTxn_Call{Call: _e.mock.On("WithTxn", _a0)}
-}
-
-func (_c *DB_WithTxn_Call) Run(run func(_a0 datastore.Txn)) *DB_WithTxn_Call {
- _c.Call.Run(func(args mock.Arguments) {
- run(args[0].(datastore.Txn))
- })
- return _c
-}
-
-func (_c *DB_WithTxn_Call) Return(_a0 client.Store) *DB_WithTxn_Call {
- _c.Call.Return(_a0)
- return _c
-}
-
-func (_c *DB_WithTxn_Call) RunAndReturn(run func(datastore.Txn) client.Store) *DB_WithTxn_Call {
- _c.Call.Return(run)
- return _c
-}
-
// NewDB creates a new instance of DB. It also registers a testing interface on the mock and a cleanup function to assert the mocks expectations.
// The first argument is typically a *testing.T value.
func NewDB(t interface {
diff --git a/client/normal_array.go b/client/normal_array.go
new file mode 100644
index 0000000000..00133a0f74
--- /dev/null
+++ b/client/normal_array.go
@@ -0,0 +1,149 @@
+// Copyright 2024 Democratized Data Foundation
+//
+// Use of this software is governed by the Business Source License
+// included in the file licenses/BSL.txt.
+//
+// As of the Change Date specified in that file, in accordance with
+// the Business Source License, use of this software will be governed
+// by the Apache License, Version 2.0, included in the file
+// licenses/APL.txt.
+
+package client
+
+import (
+ "time"
+
+ "golang.org/x/exp/constraints"
+)
+
+type baseArrayNormalValue[T any] struct {
+ NormalVoid
+ val T
+}
+
+func (v baseArrayNormalValue[T]) Unwrap() any {
+ return v.val
+}
+
+func (v baseArrayNormalValue[T]) IsArray() bool {
+ return true
+}
+
+func newBaseArrayNormalValue[T any](val T) baseArrayNormalValue[T] {
+ return baseArrayNormalValue[T]{val: val}
+}
+
+type normalBoolArray struct {
+ baseArrayNormalValue[[]bool]
+}
+
+func (v normalBoolArray) BoolArray() ([]bool, bool) {
+ return v.val, true
+}
+
+type normalIntArray struct {
+ baseArrayNormalValue[[]int64]
+}
+
+func (v normalIntArray) IntArray() ([]int64, bool) {
+ return v.val, true
+}
+
+type normalFloatArray struct {
+ baseArrayNormalValue[[]float64]
+}
+
+func (v normalFloatArray) FloatArray() ([]float64, bool) {
+ return v.val, true
+}
+
+type normalStringArray struct {
+ baseArrayNormalValue[[]string]
+}
+
+func (v normalStringArray) StringArray() ([]string, bool) {
+ return v.val, true
+}
+
+type normalBytesArray struct {
+ baseArrayNormalValue[[][]byte]
+}
+
+func (v normalBytesArray) BytesArray() ([][]byte, bool) {
+ return v.val, true
+}
+
+type normalTimeArray struct {
+ baseArrayNormalValue[[]time.Time]
+}
+
+func (v normalTimeArray) TimeArray() ([]time.Time, bool) {
+ return v.val, true
+}
+
+type normalDocumentArray struct {
+ baseArrayNormalValue[[]*Document]
+}
+
+func (v normalDocumentArray) DocumentArray() ([]*Document, bool) {
+ return v.val, true
+}
+
+// NewNormalBoolArray creates a new NormalValue that represents a `[]bool` value.
+func NewNormalBoolArray(val []bool) NormalValue {
+ return normalBoolArray{newBaseArrayNormalValue(val)}
+}
+
+// NewNormalIntArray creates a new NormalValue that represents a `[]int64` value.
+func NewNormalIntArray[T constraints.Integer | constraints.Float](val []T) NormalValue {
+ return normalIntArray{newBaseArrayNormalValue(normalizeNumArr[int64](val))}
+}
+
+// NewNormalFloatArray creates a new NormalValue that represents a `[]float64` value.
+func NewNormalFloatArray[T constraints.Integer | constraints.Float](val []T) NormalValue {
+ return normalFloatArray{newBaseArrayNormalValue(normalizeNumArr[float64](val))}
+}
+
+// NewNormalStringArray creates a new NormalValue that represents a `[]string` value.
+func NewNormalStringArray[T string | []byte](val []T) NormalValue {
+ return normalStringArray{newBaseArrayNormalValue(normalizeCharsArr[string](val))}
+}
+
+// NewNormalBytesArray creates a new NormalValue that represents a `[][]byte` value.
+func NewNormalBytesArray[T string | []byte](val []T) NormalValue {
+ return normalBytesArray{newBaseArrayNormalValue(normalizeCharsArr[[]byte](val))}
+}
+
+// NewNormalTimeArray creates a new NormalValue that represents a `[]time.Time` value.
+func NewNormalTimeArray(val []time.Time) NormalValue {
+ return normalTimeArray{newBaseArrayNormalValue(val)}
+}
+
+// NewNormalDocumentArray creates a new NormalValue that represents a `[]*Document` value.
+func NewNormalDocumentArray(val []*Document) NormalValue {
+ return normalDocumentArray{newBaseArrayNormalValue(val)}
+}
+
+func normalizeNumArr[R int64 | float64, T constraints.Integer | constraints.Float](val []T) []R {
+ var v any = val
+ if arr, ok := v.([]R); ok {
+ return arr
+ }
+ arr := make([]R, len(val))
+ for i, v := range val {
+ arr[i] = R(v)
+ }
+ return arr
+}
+
+func normalizeCharsArr[R string | []byte, T string | []byte](val []T) []R {
+ var v any = val
+ if arr, ok := v.([]R); ok {
+ return arr
+ }
+ arr := make([]R, len(val))
+ for i, v := range val {
+ arr[i] = R(v)
+ }
+ return arr
+}
diff --git a/client/normal_array_of_nillables.go b/client/normal_array_of_nillables.go
new file mode 100644
index 0000000000..53461f6afa
--- /dev/null
+++ b/client/normal_array_of_nillables.go
@@ -0,0 +1,142 @@
+// Copyright 2024 Democratized Data Foundation
+//
+// Use of this software is governed by the Business Source License
+// included in the file licenses/BSL.txt.
+//
+// As of the Change Date specified in that file, in accordance with
+// the Business Source License, use of this software will be governed
+// by the Apache License, Version 2.0, included in the file
+// licenses/APL.txt.
+
+package client
+
+import (
+ "time"
+
+ "github.com/sourcenetwork/immutable"
+ "golang.org/x/exp/constraints"
+)
+
+type normalNillableBoolArray struct {
+ baseArrayNormalValue[[]immutable.Option[bool]]
+}
+
+func (v normalNillableBoolArray) NillableBoolArray() ([]immutable.Option[bool], bool) {
+ return v.val, true
+}
+
+type normalNillableIntArray struct {
+ baseArrayNormalValue[[]immutable.Option[int64]]
+}
+
+func (v normalNillableIntArray) NillableIntArray() ([]immutable.Option[int64], bool) {
+ return v.val, true
+}
+
+type normalNillableFloatArray struct {
+ baseArrayNormalValue[[]immutable.Option[float64]]
+}
+
+func (v normalNillableFloatArray) NillableFloatArray() ([]immutable.Option[float64], bool) {
+ return v.val, true
+}
+
+type normalNillableStringArray struct {
+ baseArrayNormalValue[[]immutable.Option[string]]
+}
+
+func (v normalNillableStringArray) NillableStringArray() ([]immutable.Option[string], bool) {
+ return v.val, true
+}
+
+type normalNillableBytesArray struct {
+ baseArrayNormalValue[[]immutable.Option[[]byte]]
+}
+
+func (v normalNillableBytesArray) NillableBytesArray() ([]immutable.Option[[]byte], bool) {
+ return v.val, true
+}
+
+type normalNillableTimeArray struct {
+ baseArrayNormalValue[[]immutable.Option[time.Time]]
+}
+
+func (v normalNillableTimeArray) NillableTimeArray() ([]immutable.Option[time.Time], bool) {
+ return v.val, true
+}
+
+type normalNillableDocumentArray struct {
+ baseArrayNormalValue[[]immutable.Option[*Document]]
+}
+
+func (v normalNillableDocumentArray) NillableDocumentArray() ([]immutable.Option[*Document], bool) {
+ return v.val, true
+}
+
+// NewNormalNillableBoolNillableArray creates a new NormalValue that represents a
+// `immutable.Option[[]immutable.Option[bool]]` value.
+func NewNormalNillableBoolArray(val []immutable.Option[bool]) NormalValue {
+ return normalNillableBoolArray{newBaseArrayNormalValue(val)}
+}
+
+// NewNormalNillableIntArray creates a new NormalValue that represents a `[]immutable.Option[int64]` value.
+func NewNormalNillableIntArray[T constraints.Integer | constraints.Float](val []immutable.Option[T]) NormalValue {
+ return normalNillableIntArray{newBaseArrayNormalValue(normalizeNillableNumArr[int64](val))}
+}
+
+// NewNormalNillableFloatArray creates a new NormalValue that represents a `[]immutable.Option[float64]` value.
+func NewNormalNillableFloatArray[T constraints.Integer | constraints.Float](
+ val []immutable.Option[T],
+) NormalValue {
+ return normalNillableFloatArray{newBaseArrayNormalValue(normalizeNillableNumArr[float64](val))}
+}
+
+// NewNormalNillableStringArray creates a new NormalValue that represents a `[]immutable.Option[string]` value.
+func NewNormalNillableStringArray[T string | []byte](val []immutable.Option[T]) NormalValue {
+ return normalNillableStringArray{newBaseArrayNormalValue(normalizeNillableCharsArr[string](val))}
+}
+
+// NewNormalNillableBytesArray creates a new NormalValue that represents a `[]immutable.Option[[]byte]` value.
+func NewNormalNillableBytesArray[T string | []byte](val []immutable.Option[T]) NormalValue {
+ return normalNillableBytesArray{newBaseArrayNormalValue(normalizeNillableCharsArr[[]byte](val))}
+}
+
+// NewNormalNillableTimeArray creates a new NormalValue that represents a `[]immutable.Option[time.Time]` value.
+func NewNormalNillableTimeArray(val []immutable.Option[time.Time]) NormalValue {
+ return normalNillableTimeArray{newBaseArrayNormalValue(val)}
+}
+
+// NewNormalNillableDocumentArray creates a new NormalValue that represents a `[]immutable.Option[*Document]` value.
+func NewNormalNillableDocumentArray(val []immutable.Option[*Document]) NormalValue {
+ return normalNillableDocumentArray{newBaseArrayNormalValue(val)}
+}
+
+func normalizeNillableNumArr[R int64 | float64, T constraints.Integer | constraints.Float](
+ val []immutable.Option[T],
+) []immutable.Option[R] {
+ var v any = val
+ if arr, ok := v.([]immutable.Option[R]); ok {
+ return arr
+ }
+ arr := make([]immutable.Option[R], len(val))
+ for i, v := range val {
+ arr[i] = normalizeNillableNum[R](v)
+ }
+ return arr
+}
+
+func normalizeNillableCharsArr[R string | []byte, T string | []byte](val []immutable.Option[T]) []immutable.Option[R] {
+ var v any = val
+ if arr, ok := v.([]immutable.Option[R]); ok {
+ return arr
+ }
+ arr := make([]immutable.Option[R], len(val))
+ for i, v := range val {
+ if v.HasValue() {
+ arr[i] = immutable.Some(R(v.Value()))
+ } else {
+ arr[i] = immutable.None[R]()
+ }
+ }
+ return arr
+}
diff --git a/client/normal_new.go b/client/normal_new.go
new file mode 100644
index 0000000000..55ac46ce73
--- /dev/null
+++ b/client/normal_new.go
@@ -0,0 +1,465 @@
+// Copyright 2024 Democratized Data Foundation
+//
+// Use of this software is governed by the Business Source License
+// included in the file licenses/BSL.txt.
+//
+// As of the Change Date specified in that file, in accordance with
+// the Business Source License, use of this software will be governed
+// by the Apache License, Version 2.0, included in the file
+// licenses/APL.txt.
+
+package client
+
+import (
+ "time"
+
+ "github.com/sourcenetwork/immutable"
+)
+
+// NewNormalValue creates a new NormalValue from the given value.
+// It will normalize all known types that can be converted to normal ones.
+// For example, if the given type is `[]int32`, it will be converted to `[]int64`.
+// If the given value is of type `[]any` it will go through every element and try to convert it
+// the most common type and normalizes it.
+// For examples, the following conversions will be made:
+// - `[]any{int32(1), int64(2)}` -> `[]int64{1, 2}`.
+// - `[]any{int32(1), int64(2), float32(1.5)}` -> `[]float64{1.0, 2.0, 1.5}`.
+// - `[]any{int32(1), nil}` -> `[]immutable.Option[int64]{immutable.Some(1), immutable.None[int64]()}`.
+//
+// This function will not check if the given value is `nil`. To normalize a `nil` value use the
+// `NewNormalNil` function.
+func NewNormalValue(val any) (NormalValue, error) {
+ switch v := val.(type) {
+ case bool:
+ return NewNormalBool(v), nil
+ case int8:
+ return newNormalInt(int64(v)), nil
+ case int16:
+ return newNormalInt(int64(v)), nil
+ case int32:
+ return newNormalInt(int64(v)), nil
+ case int64:
+ return newNormalInt(v), nil
+ case int:
+ return newNormalInt(int64(v)), nil
+ case uint8:
+ return newNormalInt(int64(v)), nil
+ case uint16:
+ return newNormalInt(int64(v)), nil
+ case uint32:
+ return newNormalInt(int64(v)), nil
+ case uint64:
+ return newNormalInt(int64(v)), nil
+ case uint:
+ return newNormalInt(int64(v)), nil
+ case float32:
+ return newNormalFloat(float64(v)), nil
+ case float64:
+ return newNormalFloat(v), nil
+ case string:
+ return NewNormalString(v), nil
+ case []byte:
+ return NewNormalBytes(v), nil
+ case time.Time:
+ return NewNormalTime(v), nil
+ case *Document:
+ return NewNormalDocument(v), nil
+
+ case immutable.Option[bool]:
+ return NewNormalNillableBool(v), nil
+ case immutable.Option[int8]:
+ return NewNormalNillableInt(v), nil
+ case immutable.Option[int16]:
+ return NewNormalNillableInt(v), nil
+ case immutable.Option[int32]:
+ return NewNormalNillableInt(v), nil
+ case immutable.Option[int64]:
+ return NewNormalNillableInt(v), nil
+ case immutable.Option[int]:
+ return NewNormalNillableInt(v), nil
+ case immutable.Option[uint8]:
+ return NewNormalNillableInt(v), nil
+ case immutable.Option[uint16]:
+ return NewNormalNillableInt(v), nil
+ case immutable.Option[uint32]:
+ return NewNormalNillableInt(v), nil
+ case immutable.Option[uint64]:
+ return NewNormalNillableInt(v), nil
+ case immutable.Option[uint]:
+ return NewNormalNillableInt(v), nil
+ case immutable.Option[float32]:
+ return NewNormalNillableFloat(v), nil
+ case immutable.Option[float64]:
+ return NewNormalNillableFloat(v), nil
+ case immutable.Option[string]:
+ return NewNormalNillableString(v), nil
+ case immutable.Option[[]byte]:
+ return NewNormalNillableBytes(v), nil
+ case immutable.Option[time.Time]:
+ return NewNormalNillableTime(v), nil
+ case immutable.Option[*Document]:
+ return NewNormalNillableDocument(v), nil
+
+ case []bool:
+ return NewNormalBoolArray(v), nil
+ case []int8:
+ return NewNormalIntArray(v), nil
+ case []int16:
+ return NewNormalIntArray(v), nil
+ case []int32:
+ return NewNormalIntArray(v), nil
+ case []int64:
+ return NewNormalIntArray(v), nil
+ case []int:
+ return NewNormalIntArray(v), nil
+ case []uint16:
+ return NewNormalIntArray(v), nil
+ case []uint32:
+ return NewNormalIntArray(v), nil
+ case []uint64:
+ return NewNormalIntArray(v), nil
+ case []uint:
+ return NewNormalIntArray(v), nil
+ case []float32:
+ return NewNormalFloatArray(v), nil
+ case []float64:
+ return NewNormalFloatArray(v), nil
+ case []string:
+ return NewNormalStringArray(v), nil
+ case [][]byte:
+ return NewNormalBytesArray(v), nil
+ case []time.Time:
+ return NewNormalTimeArray(v), nil
+ case []*Document:
+ return NewNormalDocumentArray(v), nil
+
+ case []immutable.Option[bool]:
+ return NewNormalNillableBoolArray(v), nil
+ case []immutable.Option[int8]:
+ return NewNormalNillableIntArray(v), nil
+ case []immutable.Option[int16]:
+ return NewNormalNillableIntArray(v), nil
+ case []immutable.Option[int32]:
+ return NewNormalNillableIntArray(v), nil
+ case []immutable.Option[int64]:
+ return NewNormalNillableIntArray(v), nil
+ case []immutable.Option[int]:
+ return NewNormalNillableIntArray(v), nil
+ case []immutable.Option[uint8]:
+ return NewNormalNillableIntArray(v), nil
+ case []immutable.Option[uint16]:
+ return NewNormalNillableIntArray(v), nil
+ case []immutable.Option[uint32]:
+ return NewNormalNillableIntArray(v), nil
+ case []immutable.Option[uint64]:
+ return NewNormalNillableIntArray(v), nil
+ case []immutable.Option[uint]:
+ return NewNormalNillableIntArray(v), nil
+ case []immutable.Option[float32]:
+ return NewNormalNillableFloatArray(v), nil
+ case []immutable.Option[float64]:
+ return NewNormalNillableFloatArray(v), nil
+ case []immutable.Option[string]:
+ return NewNormalNillableStringArray(v), nil
+ case []immutable.Option[[]byte]:
+ return NewNormalNillableBytesArray(v), nil
+ case []immutable.Option[time.Time]:
+ return NewNormalNillableTimeArray(v), nil
+ case []immutable.Option[*Document]:
+ return NewNormalNillableDocumentArray(v), nil
+
+ case immutable.Option[[]bool]:
+ return NewNormalBoolNillableArray(v), nil
+ case immutable.Option[[]int8]:
+ return NewNormalIntNillableArray(v), nil
+ case immutable.Option[[]int16]:
+ return NewNormalIntNillableArray(v), nil
+ case immutable.Option[[]int32]:
+ return NewNormalIntNillableArray(v), nil
+ case immutable.Option[[]int64]:
+ return NewNormalIntNillableArray(v), nil
+ case immutable.Option[[]int]:
+ return NewNormalIntNillableArray(v), nil
+ case immutable.Option[[]uint16]:
+ return NewNormalIntNillableArray(v), nil
+ case immutable.Option[[]uint32]:
+ return NewNormalIntNillableArray(v), nil
+ case immutable.Option[[]uint64]:
+ return NewNormalIntNillableArray(v), nil
+ case immutable.Option[[]uint]:
+ return NewNormalIntNillableArray(v), nil
+ case immutable.Option[[]float32]:
+ return NewNormalFloatNillableArray(v), nil
+ case immutable.Option[[]float64]:
+ return NewNormalFloatNillableArray(v), nil
+ case immutable.Option[[]string]:
+ return NewNormalStringNillableArray(v), nil
+ case immutable.Option[[][]byte]:
+ return NewNormalBytesNillableArray(v), nil
+ case immutable.Option[[]time.Time]:
+ return NewNormalTimeNillableArray(v), nil
+ case immutable.Option[[]*Document]:
+ return NewNormalDocumentNillableArray(v), nil
+
+ case immutable.Option[[]immutable.Option[bool]]:
+ return NewNormalNillableBoolNillableArray(v), nil
+ case immutable.Option[[]immutable.Option[int8]]:
+ return NewNormalNillableIntNillableArray(v), nil
+ case immutable.Option[[]immutable.Option[int16]]:
+ return NewNormalNillableIntNillableArray(v), nil
+ case immutable.Option[[]immutable.Option[int32]]:
+ return NewNormalNillableIntNillableArray(v), nil
+ case immutable.Option[[]immutable.Option[int64]]:
+ return NewNormalNillableIntNillableArray(v), nil
+ case immutable.Option[[]immutable.Option[int]]:
+ return NewNormalNillableIntNillableArray(v), nil
+ case immutable.Option[[]immutable.Option[uint8]]:
+ return NewNormalNillableIntNillableArray(v), nil
+ case immutable.Option[[]immutable.Option[uint16]]:
+ return NewNormalNillableIntNillableArray(v), nil
+ case immutable.Option[[]immutable.Option[uint32]]:
+ return NewNormalNillableIntNillableArray(v), nil
+ case immutable.Option[[]immutable.Option[uint64]]:
+ return NewNormalNillableIntNillableArray(v), nil
+ case immutable.Option[[]immutable.Option[uint]]:
+ return NewNormalNillableIntNillableArray(v), nil
+ case immutable.Option[[]immutable.Option[float32]]:
+ return NewNormalNillableFloatNillableArray(v), nil
+ case immutable.Option[[]immutable.Option[float64]]:
+ return NewNormalNillableFloatNillableArray(v), nil
+ case immutable.Option[[]immutable.Option[string]]:
+ return NewNormalNillableStringNillableArray(v), nil
+ case immutable.Option[[]immutable.Option[[]byte]]:
+ return NewNormalNillableBytesNillableArray(v), nil
+ case immutable.Option[[]immutable.Option[time.Time]]:
+ return NewNormalNillableTimeNillableArray(v), nil
+ case immutable.Option[[]immutable.Option[*Document]]:
+ return NewNormalNillableDocumentNillableArray(v), nil
+
+ case []any:
+ if len(v) == 0 {
+ return nil, NewCanNotNormalizeValue(val)
+ }
+ first, err := NewNormalValue(v[0])
+ if err != nil {
+ return nil, err
+ }
+ if _, ok := first.Bool(); ok {
+ return convertAnyArrToTypedArr[bool](v, NewNormalBoolArray, NewNormalNillableBoolArray)
+ }
+ if _, ok := first.Int(); ok {
+ return convertAnyArrToIntOrFloatArr(v)
+ }
+ if _, ok := first.Float(); ok {
+ return convertAnyArrToFloatArr(v)
+ }
+ if _, ok := first.String(); ok {
+ return convertAnyArrToTypedArr[string](v, NewNormalStringArray, NewNormalNillableStringArray)
+ }
+ if _, ok := first.Bytes(); ok {
+ return convertAnyArrToTypedArr[[]byte](v, NewNormalBytesArray, NewNormalNillableBytesArray)
+ }
+ if _, ok := first.Time(); ok {
+ return convertAnyArrToTypedArr[time.Time](v, NewNormalTimeArray, NewNormalNillableTimeArray)
+ }
+ if _, ok := first.Document(); ok {
+ return convertAnyArrToTypedArr[*Document](v, NewNormalDocumentArray, NewNormalNillableDocumentArray)
+ }
+ }
+ return nil, NewCanNotNormalizeValue(val)
+}
+
+func convertAnyArrToIntOrFloatArr(arr []any) (NormalValue, error) {
+ result := make([]int64, len(arr))
+ for i := range arr {
+ if arr[i] == nil {
+ return convertAnyArrToNillableIntOrFloatArr(arr)
+ }
+ switch v := arr[i].(type) {
+ case int64:
+ result[i] = v
+ case float64, float32:
+ return convertAnyArrToFloatArr(arr)
+ case int8:
+ result[i] = int64(v)
+ case int16:
+ result[i] = int64(v)
+ case int32:
+ result[i] = int64(v)
+ case int:
+ result[i] = int64(v)
+ case uint8:
+ result[i] = int64(v)
+ case uint16:
+ result[i] = int64(v)
+ case uint32:
+ result[i] = int64(v)
+ case uint64:
+ result[i] = int64(v)
+ case uint:
+ result[i] = int64(v)
+ default:
+ return nil, NewCanNotNormalizeValue(arr)
+ }
+ }
+ return NewNormalIntArray(result), nil
+}
+
+func convertAnyArrToNillableIntOrFloatArr(arr []any) (NormalValue, error) {
+ result := make([]immutable.Option[int64], len(arr))
+ for i := range arr {
+ if arr[i] == nil {
+ result[i] = immutable.None[int64]()
+ continue
+ }
+ var intVal int64
+ switch v := arr[i].(type) {
+ case int64:
+ intVal = v
+ case float64, float32:
+ return convertAnyArrToFloatArr(arr)
+ case int8:
+ intVal = int64(v)
+ case int16:
+ intVal = int64(v)
+ case int32:
+ intVal = int64(v)
+ case int:
+ intVal = int64(v)
+ case uint8:
+ intVal = int64(v)
+ case uint16:
+ intVal = int64(v)
+ case uint32:
+ intVal = int64(v)
+ case uint64:
+ intVal = int64(v)
+ case uint:
+ intVal = int64(v)
+ default:
+ return nil, NewCanNotNormalizeValue(arr)
+ }
+ result[i] = immutable.Some(intVal)
+ }
+ return NewNormalNillableIntArray(result), nil
+}
+
+func convertAnyArrToFloatArr(arr []any) (NormalValue, error) {
+ result := make([]float64, len(arr))
+ for i := range arr {
+ if arr[i] == nil {
+ return convertAnyArrToNillableFloatArr(arr)
+ }
+
+ var floatVal float64
+ switch v := arr[i].(type) {
+ case float64:
+ floatVal = v
+ case float32:
+ floatVal = float64(v)
+ case int8:
+ floatVal = float64(v)
+ case int16:
+ floatVal = float64(v)
+ case int32:
+ floatVal = float64(v)
+ case int64:
+ floatVal = float64(v)
+ case int:
+ floatVal = float64(v)
+ case uint8:
+ floatVal = float64(v)
+ case uint16:
+ floatVal = float64(v)
+ case uint32:
+ floatVal = float64(v)
+ case uint64:
+ floatVal = float64(v)
+ case uint:
+ floatVal = float64(v)
+ default:
+ return nil, NewCanNotNormalizeValue(arr)
+ }
+ result[i] = floatVal
+ }
+ return NewNormalFloatArray(result), nil
+}
+
+func convertAnyArrToNillableFloatArr(arr []any) (NormalValue, error) {
+ result := make([]immutable.Option[float64], len(arr))
+ for i := range arr {
+ if arr[i] == nil {
+ result[i] = immutable.None[float64]()
+ continue
+ }
+ var floatVal float64
+ switch v := arr[i].(type) {
+ case float64:
+ floatVal = v
+ case float32:
+ floatVal = float64(v)
+ case int8:
+ floatVal = float64(v)
+ case int16:
+ floatVal = float64(v)
+ case int32:
+ floatVal = float64(v)
+ case int64:
+ floatVal = float64(v)
+ case int:
+ floatVal = float64(v)
+ case uint8:
+ floatVal = float64(v)
+ case uint16:
+ floatVal = float64(v)
+ case uint32:
+ floatVal = float64(v)
+ case uint64:
+ floatVal = float64(v)
+ case uint:
+ floatVal = float64(v)
+ default:
+ return nil, NewCanNotNormalizeValue(arr)
+ }
+ result[i] = immutable.Some(floatVal)
+ }
+ return NewNormalNillableFloatArray(result), nil
+}
+
+func convertAnyArrToTypedArr[T any](
+ arr []any,
+ newNormalArr func([]T) NormalValue,
+ newNormalNillableArr func([]immutable.Option[T]) NormalValue,
+) (NormalValue, error) {
+ result := make([]T, len(arr))
+ for i := range arr {
+ if arr[i] == nil {
+ return convertAnyArrToNillableTypedArr[T](arr, newNormalNillableArr)
+ }
+ if v, ok := arr[i].(T); ok {
+ result[i] = v
+ } else {
+ return nil, NewCanNotNormalizeValue(arr)
+ }
+ }
+ return newNormalArr(result), nil
+}
+
+func convertAnyArrToNillableTypedArr[T any](
+ arr []any,
+ newNormalNillableArr func([]immutable.Option[T]) NormalValue,
+) (NormalValue, error) {
+ result := make([]immutable.Option[T], len(arr))
+ for i := range arr {
+ if arr[i] == nil {
+ result[i] = immutable.None[T]()
+ continue
+ }
+ if v, ok := arr[i].(T); ok {
+ result[i] = immutable.Some(v)
+ } else {
+ return nil, NewCanNotNormalizeValue(arr)
+ }
+ }
+ return newNormalNillableArr(result), nil
+}
diff --git a/client/normal_nil.go b/client/normal_nil.go
new file mode 100644
index 0000000000..7cd2df3f16
--- /dev/null
+++ b/client/normal_nil.go
@@ -0,0 +1,56 @@
+// Copyright 2024 Democratized Data Foundation
+//
+// Use of this software is governed by the Business Source License
+// included in the file licenses/BSL.txt.
+//
+// As of the Change Date specified in that file, in accordance with
+// the Business Source License, use of this software will be governed
+// by the Apache License, Version 2.0, included in the file
+// licenses/APL.txt.
+
+package client
+
+import (
+ "time"
+
+ "github.com/sourcenetwork/immutable"
+)
+
+// NewNormalNil creates a new NormalValue that represents a nil value of a given field kind.
+func NewNormalNil(kind FieldKind) (NormalValue, error) {
+ if kind.IsObject() {
+ return NewNormalNillableDocument(immutable.None[*Document]()), nil
+ }
+ switch kind {
+ case FieldKind_NILLABLE_BOOL:
+ return NewNormalNillableBool(immutable.None[bool]()), nil
+ case FieldKind_NILLABLE_INT:
+ return NewNormalNillableInt(immutable.None[int64]()), nil
+ case FieldKind_NILLABLE_FLOAT:
+ return NewNormalNillableFloat(immutable.None[float64]()), nil
+ case FieldKind_NILLABLE_DATETIME:
+ return NewNormalNillableTime(immutable.None[time.Time]()), nil
+ case FieldKind_NILLABLE_STRING, FieldKind_NILLABLE_JSON:
+ return NewNormalNillableString(immutable.None[string]()), nil
+ case FieldKind_NILLABLE_BLOB:
+ return NewNormalNillableBytes(immutable.None[[]byte]()), nil
+ case FieldKind_BOOL_ARRAY:
+ return NewNormalBoolNillableArray(immutable.None[[]bool]()), nil
+ case FieldKind_INT_ARRAY:
+ return NewNormalIntNillableArray(immutable.None[[]int64]()), nil
+ case FieldKind_FLOAT_ARRAY:
+ return NewNormalFloatNillableArray(immutable.None[[]float64]()), nil
+ case FieldKind_STRING_ARRAY:
+ return NewNormalStringNillableArray(immutable.None[[]string]()), nil
+ case FieldKind_NILLABLE_BOOL_ARRAY:
+ return NewNormalNillableBoolNillableArray(immutable.None[[]immutable.Option[bool]]()), nil
+ case FieldKind_NILLABLE_INT_ARRAY:
+ return NewNormalNillableIntNillableArray(immutable.None[[]immutable.Option[int]]()), nil
+ case FieldKind_NILLABLE_FLOAT_ARRAY:
+ return NewNormalNillableFloatNillableArray(immutable.None[[]immutable.Option[float64]]()), nil
+ case FieldKind_NILLABLE_STRING_ARRAY:
+ return NewNormalNillableStringNillableArray(immutable.None[[]immutable.Option[string]]()), nil
+ default:
+ return nil, NewCanNotMakeNormalNilFromFieldKind(kind)
+ }
+}
diff --git a/client/normal_nillable_array.go b/client/normal_nillable_array.go
new file mode 100644
index 0000000000..fa6bdc4bbb
--- /dev/null
+++ b/client/normal_nillable_array.go
@@ -0,0 +1,152 @@
+// Copyright 2024 Democratized Data Foundation
+//
+// Use of this software is governed by the Business Source License
+// included in the file licenses/BSL.txt.
+//
+// As of the Change Date specified in that file, in accordance with
+// the Business Source License, use of this software will be governed
+// by the Apache License, Version 2.0, included in the file
+// licenses/APL.txt.
+
+package client
+
+import (
+ "time"
+
+ "github.com/sourcenetwork/immutable"
+ "golang.org/x/exp/constraints"
+)
+
+type baseNillableArrayNormalValue[T any] struct {
+ baseArrayNormalValue[immutable.Option[T]]
+}
+
+func (v baseNillableArrayNormalValue[T]) Unwrap() any {
+ if v.val.HasValue() {
+ return v.val.Value()
+ }
+ return nil
+}
+
+func (v baseNillableArrayNormalValue[T]) IsNil() bool {
+ return !v.val.HasValue()
+}
+
+func (v baseNillableArrayNormalValue[T]) IsNillable() bool {
+ return true
+}
+
+func (v baseNillableArrayNormalValue[T]) IsArray() bool {
+ return true
+}
+
+func newBaseNillableArrayNormalValue[T any](val immutable.Option[T]) baseNillableArrayNormalValue[T] {
+ return baseNillableArrayNormalValue[T]{newBaseArrayNormalValue(val)}
+}
+
+type normalBoolNillableArray struct {
+ baseNillableArrayNormalValue[[]bool]
+}
+
+func (v normalBoolNillableArray) BoolNillableArray() (immutable.Option[[]bool], bool) {
+ return v.val, true
+}
+
+type normalIntNillableArray struct {
+ baseNillableArrayNormalValue[[]int64]
+}
+
+func (v normalIntNillableArray) IntNillableArray() (immutable.Option[[]int64], bool) {
+ return v.val, true
+}
+
+type normalFloatNillableArray struct {
+ baseNillableArrayNormalValue[[]float64]
+}
+
+func (v normalFloatNillableArray) FloatNillableArray() (immutable.Option[[]float64], bool) {
+ return v.val, true
+}
+
+type normalStringNillableArray struct {
+ baseNillableArrayNormalValue[[]string]
+}
+
+func (v normalStringNillableArray) StringNillableArray() (immutable.Option[[]string], bool) {
+ return v.val, true
+}
+
+type normalBytesNillableArray struct {
+ baseNillableArrayNormalValue[[][]byte]
+}
+
+func (v normalBytesNillableArray) BytesNillableArray() (immutable.Option[[][]byte], bool) {
+ return v.val, true
+}
+
+type normalTimeNillableArray struct {
+ baseNillableArrayNormalValue[[]time.Time]
+}
+
+func (v normalTimeNillableArray) TimeNillableArray() (immutable.Option[[]time.Time], bool) {
+ return v.val, true
+}
+
+type normalDocumentNillableArray struct {
+ baseNillableArrayNormalValue[[]*Document]
+}
+
+func (v normalDocumentNillableArray) DocumentNillableArray() (immutable.Option[[]*Document], bool) {
+ return v.val, true
+}
+
+// NewNormalNillableBoolArray creates a new NormalValue that represents a `immutable.Option[[]bool]` value.
+func NewNormalBoolNillableArray(val immutable.Option[[]bool]) NormalValue {
+ return normalBoolNillableArray{newBaseNillableArrayNormalValue(val)}
+}
+
+// NewNormalNillableIntArray creates a new NormalValue that represents a `immutable.Option[[]int64]` value.
+func NewNormalIntNillableArray[T constraints.Integer | constraints.Float](val immutable.Option[[]T]) NormalValue {
+ return normalIntNillableArray{newBaseNillableArrayNormalValue(normalizeNumNillableArr[int64](val))}
+}
+
+// NewNormalNillableFloatArray creates a new NormalValue that represents a `immutable.Option[[]float64]` value.
+func NewNormalFloatNillableArray[T constraints.Integer | constraints.Float](val immutable.Option[[]T]) NormalValue {
+ return normalFloatNillableArray{newBaseNillableArrayNormalValue(normalizeNumNillableArr[float64](val))}
+}
+
+// NewNormalNillableStringArray creates a new NormalValue that represents a `immutable.Option[[]string]` value.
+func NewNormalStringNillableArray[T string | []byte](val immutable.Option[[]T]) NormalValue {
+ return normalStringNillableArray{newBaseNillableArrayNormalValue(normalizeCharsNillableArr[string](val))}
+}
+
+// NewNormalNillableBytesArray creates a new NormalValue that represents a `immutable.Option[[][]byte]` value.
+func NewNormalBytesNillableArray[T string | []byte](val immutable.Option[[]T]) NormalValue {
+ return normalBytesNillableArray{newBaseNillableArrayNormalValue(normalizeCharsNillableArr[[]byte](val))}
+}
+
+// NewNormalNillableTimeArray creates a new NormalValue that represents a `immutable.Option[[]time.Time]` value.
+func NewNormalTimeNillableArray(val immutable.Option[[]time.Time]) NormalValue {
+ return normalTimeNillableArray{newBaseNillableArrayNormalValue(val)}
+}
+
+// NewNormalNillableDocumentArray creates a new NormalValue that represents a `immutable.Option[[]*Document]` value.
+func NewNormalDocumentNillableArray(val immutable.Option[[]*Document]) NormalValue {
+ return normalDocumentNillableArray{newBaseNillableArrayNormalValue(val)}
+}
+
+func normalizeNumNillableArr[R int64 | float64, T constraints.Integer | constraints.Float](
+ val immutable.Option[[]T],
+) immutable.Option[[]R] {
+ if val.HasValue() {
+ return immutable.Some(normalizeNumArr[R](val.Value()))
+ }
+ return immutable.None[[]R]()
+}
+
+func normalizeCharsNillableArr[R string | []byte, T string | []byte](val immutable.Option[[]T]) immutable.Option[[]R] {
+ if val.HasValue() {
+ return immutable.Some(normalizeCharsArr[R](val.Value()))
+ }
+ return immutable.None[[]R]()
+}
diff --git a/client/normal_nillable_array_of_nillables.go b/client/normal_nillable_array_of_nillables.go
new file mode 100644
index 0000000000..3594186ba2
--- /dev/null
+++ b/client/normal_nillable_array_of_nillables.go
@@ -0,0 +1,160 @@
+// Copyright 2024 Democratized Data Foundation
+//
+// Use of this software is governed by the Business Source License
+// included in the file licenses/BSL.txt.
+//
+// As of the Change Date specified in that file, in accordance with
+// the Business Source License, use of this software will be governed
+// by the Apache License, Version 2.0, included in the file
+// licenses/APL.txt.
+
+package client
+
+import (
+ "time"
+
+ "github.com/sourcenetwork/immutable"
+ "golang.org/x/exp/constraints"
+)
+
+type normalNillableBoolNillableArray struct {
+ baseNillableArrayNormalValue[[]immutable.Option[bool]]
+}
+
+func (v normalNillableBoolNillableArray) NillableBoolNillableArray() (
+ immutable.Option[[]immutable.Option[bool]], bool,
+) {
+ return v.val, true
+}
+
+type normalNillableIntNillableArray struct {
+ baseNillableArrayNormalValue[[]immutable.Option[int64]]
+}
+
+func (v normalNillableIntNillableArray) NillableIntNillableArray() (
+ immutable.Option[[]immutable.Option[int64]], bool,
+) {
+ return v.val, true
+}
+
+type normalNillableFloatNillableArray struct {
+ baseNillableArrayNormalValue[[]immutable.Option[float64]]
+}
+
+func (v normalNillableFloatNillableArray) NillableFloatNillableArray() (
+ immutable.Option[[]immutable.Option[float64]], bool,
+) {
+ return v.val, true
+}
+
+type normalNillableStringNillableArray struct {
+ baseNillableArrayNormalValue[[]immutable.Option[string]]
+}
+
+func (v normalNillableStringNillableArray) NillableStringNillableArray() (
+ immutable.Option[[]immutable.Option[string]], bool,
+) {
+ return v.val, true
+}
+
+type normalNillableBytesNillableArray struct {
+ baseNillableArrayNormalValue[[]immutable.Option[[]byte]]
+}
+
+func (v normalNillableBytesNillableArray) NillableBytesNillableArray() (
+ immutable.Option[[]immutable.Option[[]byte]], bool,
+) {
+ return v.val, true
+}
+
+type normalNillableTimeNillableArray struct {
+ baseNillableArrayNormalValue[[]immutable.Option[time.Time]]
+}
+
+func (v normalNillableTimeNillableArray) NillableTimeNillableArray() (
+ immutable.Option[[]immutable.Option[time.Time]], bool,
+) {
+ return v.val, true
+}
+
+type normalNillableDocumentNillableArray struct {
+ baseNillableArrayNormalValue[[]immutable.Option[*Document]]
+}
+
+func (v normalNillableDocumentNillableArray) NillableDocumentNillableArray() (
+ immutable.Option[[]immutable.Option[*Document]], bool,
+) {
+ return v.val, true
+}
+
+// NewNormalNillableBoolNillableArray creates a new NormalValue that represents a
+// `immutable.Option[[]immutable.Option[bool]]` value.
+func NewNormalNillableBoolNillableArray(val immutable.Option[[]immutable.Option[bool]]) NormalValue {
+ return normalNillableBoolNillableArray{newBaseNillableArrayNormalValue(val)}
+}
+
+// NewNormalNillableIntNillableArray creates a new NormalValue that represents a
+// `immutable.Option[[]immutable.Option[int64]]` value.
+func NewNormalNillableIntNillableArray[T constraints.Integer | constraints.Float](
+ val immutable.Option[[]immutable.Option[T]],
+) NormalValue {
+ return normalNillableIntNillableArray{
+ newBaseNillableArrayNormalValue(normalizeNillableNumNillableArr[int64](val)),
+ }
+}
+
+// NewNormalNillableFloatNillableArray creates a new NormalValue that represents a
+// `immutable.Option[[]immutable.Option[float64]]` value.
+func NewNormalNillableFloatNillableArray[T constraints.Integer | constraints.Float](
+ val immutable.Option[[]immutable.Option[T]],
+) NormalValue {
+ return normalNillableFloatNillableArray{
+ newBaseNillableArrayNormalValue(normalizeNillableNumNillableArr[float64](val)),
+ }
+}
+
+// NewNormalNillableStringNillableArray creates a new NormalValue that represents a
+// `immutable.Option[[]immutable.Option[string]]` value.
+func NewNormalNillableStringNillableArray[T string | []byte](val immutable.Option[[]immutable.Option[T]]) NormalValue {
+ return normalNillableStringNillableArray{
+ newBaseNillableArrayNormalValue(normalizeNillableCharsNillableArr[string](val)),
+ }
+}
+
+// NewNormalNillableBytesNillableArray creates a new NormalValue that represents a
+// `immutable.Option[[]immutable.Option[[]byte]]` value.
+func NewNormalNillableBytesNillableArray[T string | []byte](val immutable.Option[[]immutable.Option[T]]) NormalValue {
+ return normalNillableBytesNillableArray{
+ newBaseNillableArrayNormalValue(normalizeNillableCharsNillableArr[[]byte](val)),
+ }
+}
+
+// NewNormalNillableTimeNillableArray creates a new NormalValue that represents a
+// `immutable.Option[[]immutable.Option[time.Time]]` value.
+func NewNormalNillableTimeNillableArray(val immutable.Option[[]immutable.Option[time.Time]]) NormalValue {
+ return normalNillableTimeNillableArray{newBaseNillableArrayNormalValue(val)}
+}
+
+// NewNormalNillableDocumentNillableArray creates a new NormalValue that represents a
+// `immutable.Option[[]immutable.Option[*Document]]` value.
+func NewNormalNillableDocumentNillableArray(val immutable.Option[[]immutable.Option[*Document]]) NormalValue {
+ return normalNillableDocumentNillableArray{newBaseNillableArrayNormalValue(val)}
+}
+
+func normalizeNillableNumNillableArr[R int64 | float64, T constraints.Integer | constraints.Float](
+ val immutable.Option[[]immutable.Option[T]],
+) immutable.Option[[]immutable.Option[R]] {
+ if val.HasValue() {
+ return immutable.Some(normalizeNillableNumArr[R](val.Value()))
+ }
+ return immutable.None[[]immutable.Option[R]]()
+}
+
+func normalizeNillableCharsNillableArr[R string | []byte, T string | []byte](
+ val immutable.Option[[]immutable.Option[T]],
+) immutable.Option[[]immutable.Option[R]] {
+ if val.HasValue() {
+ return immutable.Some(normalizeNillableCharsArr[R](val.Value()))
+ }
+ return immutable.None[[]immutable.Option[R]]()
+}
diff --git a/client/normal_nillable_scalar.go b/client/normal_nillable_scalar.go
new file mode 100644
index 0000000000..88876c9d7e
--- /dev/null
+++ b/client/normal_nillable_scalar.go
@@ -0,0 +1,148 @@
+// Copyright 2024 Democratized Data Foundation
+//
+// Use of this software is governed by the Business Source License
+// included in the file licenses/BSL.txt.
+//
+// As of the Change Date specified in that file, in accordance with
+// the Business Source License, use of this software will be governed
+// by the Apache License, Version 2.0, included in the file
+// licenses/APL.txt.
+
+package client
+
+import (
+ "time"
+
+ "github.com/sourcenetwork/immutable"
+ "golang.org/x/exp/constraints"
+)
+
+type baseNillableNormalValue[T any] struct {
+ baseNormalValue[immutable.Option[T]]
+}
+
+func (v baseNillableNormalValue[T]) Unwrap() any {
+ if v.val.HasValue() {
+ return v.val.Value()
+ }
+ return nil
+}
+
+func (v baseNillableNormalValue[T]) IsNil() bool {
+ return !v.val.HasValue()
+}
+
+func (v baseNillableNormalValue[T]) IsNillable() bool {
+ return true
+}
+
+func newBaseNillableNormalValue[T any](val immutable.Option[T]) baseNillableNormalValue[T] {
+ return baseNillableNormalValue[T]{newBaseNormalValue(val)}
+}
+
+type normalNillableBool struct {
+ baseNillableNormalValue[bool]
+}
+
+func (v normalNillableBool) NillableBool() (immutable.Option[bool], bool) {
+ return v.val, true
+}
+
+type normalNillableInt struct {
+ baseNillableNormalValue[int64]
+}
+
+func (v normalNillableInt) NillableInt() (immutable.Option[int64], bool) {
+ return v.val, true
+}
+
+type normalNillableFloat struct {
+ baseNillableNormalValue[float64]
+}
+
+func (v normalNillableFloat) NillableFloat() (immutable.Option[float64], bool) {
+ return v.val, true
+}
+
+type normalNillableString struct {
+ baseNillableNormalValue[string]
+}
+
+func (v normalNillableString) NillableString() (immutable.Option[string], bool) {
+ return v.val, true
+}
+
+type normalNillableBytes struct {
+ baseNillableNormalValue[[]byte]
+}
+
+func (v normalNillableBytes) NillableBytes() (immutable.Option[[]byte], bool) {
+ return v.val, true
+}
+
+type normalNillableTime struct {
+ baseNillableNormalValue[time.Time]
+}
+
+func (v normalNillableTime) NillableTime() (immutable.Option[time.Time], bool) {
+ return v.val, true
+}
+
+type normalNillableDocument struct {
+ baseNillableNormalValue[*Document]
+}
+
+func (v normalNillableDocument) NillableDocument() (immutable.Option[*Document], bool) {
+ return v.val, true
+}
+
+// NewNormalNillableBool creates a new NormalValue that represents a `immutable.Option[bool]` value.
+func NewNormalNillableBool(val immutable.Option[bool]) NormalValue {
+ return normalNillableBool{newBaseNillableNormalValue(val)}
+}
+
+// NewNormalNillableInt creates a new NormalValue that represents a `immutable.Option[int64]` value.
+func NewNormalNillableInt[T constraints.Integer | constraints.Float](val immutable.Option[T]) NormalValue {
+ return normalNillableInt{newBaseNillableNormalValue(normalizeNillableNum[int64](val))}
+}
+
+// NewNormalNillableFloat creates a new NormalValue that represents a `immutable.Option[float64]` value.
+func NewNormalNillableFloat[T constraints.Integer | constraints.Float](val immutable.Option[T]) NormalValue {
+ return normalNillableFloat{newBaseNillableNormalValue(normalizeNillableNum[float64](val))}
+}
+
+// NewNormalNillableString creates a new NormalValue that represents a `immutable.Option[string]` value.
+func NewNormalNillableString[T string | []byte](val immutable.Option[T]) NormalValue {
+ return normalNillableString{newBaseNillableNormalValue(normalizeNillableChars[string](val))}
+}
+
+// NewNormalNillableBytes creates a new NormalValue that represents a `immutable.Option[[]byte]` value.
+func NewNormalNillableBytes[T string | []byte](val immutable.Option[T]) NormalValue {
+ return normalNillableBytes{newBaseNillableNormalValue(normalizeNillableChars[[]byte](val))}
+}
+
+// NewNormalNillableTime creates a new NormalValue that represents a `immutable.Option[time.Time]` value.
+func NewNormalNillableTime(val immutable.Option[time.Time]) NormalValue {
+ return normalNillableTime{newBaseNillableNormalValue(val)}
+}
+
+// NewNormalNillableDocument creates a new NormalValue that represents a `immutable.Option[*Document]` value.
+func NewNormalNillableDocument(val immutable.Option[*Document]) NormalValue {
+ return normalNillableDocument{newBaseNillableNormalValue(val)}
+}
+
+func normalizeNillableNum[R int64 | float64, T constraints.Integer | constraints.Float](
+ val immutable.Option[T],
+) immutable.Option[R] {
+ if val.HasValue() {
+ return immutable.Some(R(val.Value()))
+ }
+ return immutable.None[R]()
+}
+
+func normalizeNillableChars[R string | []byte, T string | []byte](val immutable.Option[T]) immutable.Option[R] {
+ if val.HasValue() {
+ return immutable.Some(R(val.Value()))
+ }
+ return immutable.None[R]()
+}
diff --git a/client/normal_scalar.go b/client/normal_scalar.go
new file mode 100644
index 0000000000..f4378f5474
--- /dev/null
+++ b/client/normal_scalar.go
@@ -0,0 +1,130 @@
+// Copyright 2024 Democratized Data Foundation
+//
+// Use of this software is governed by the Business Source License
+// included in the file licenses/BSL.txt.
+//
+// As of the Change Date specified in that file, in accordance with
+// the Business Source License, use of this software will be governed
+// by the Apache License, Version 2.0, included in the file
+// licenses/APL.txt.
+
+package client
+
+import (
+ "time"
+
+ "golang.org/x/exp/constraints"
+)
+
+// NormalValue is dummy implementation of NormalValue to be embedded in other types.
+type baseNormalValue[T any] struct {
+ NormalVoid
+ val T
+}
+
+func (v baseNormalValue[T]) Unwrap() any {
+ return v.val
+}
+
+func newBaseNormalValue[T any](val T) baseNormalValue[T] {
+ return baseNormalValue[T]{val: val}
+}
+
+type normalBool struct {
+ baseNormalValue[bool]
+}
+
+func (v normalBool) Bool() (bool, bool) {
+ return v.val, true
+}
+
+type normalInt struct {
+ baseNormalValue[int64]
+}
+
+func (v normalInt) Int() (int64, bool) {
+ return v.val, true
+}
+
+type normalFloat struct {
+ baseNormalValue[float64]
+}
+
+func (v normalFloat) Float() (float64, bool) {
+ return v.val, true
+}
+
+type normalString struct {
+ baseNormalValue[string]
+}
+
+func (v normalString) String() (string, bool) {
+ return v.val, true
+}
+
+type normalBytes struct {
+ baseNormalValue[[]byte]
+}
+
+func (v normalBytes) Bytes() ([]byte, bool) {
+ return v.val, true
+}
+
+type normalTime struct {
+ baseNormalValue[time.Time]
+}
+
+func (v normalTime) Time() (time.Time, bool) {
+ return v.val, true
+}
+
+type normalDocument struct {
+ baseNormalValue[*Document]
+}
+
+func (v normalDocument) Document() (*Document, bool) {
+ return v.val, true
+}
+
+func newNormalInt(val int64) NormalValue {
+ return normalInt{newBaseNormalValue(val)}
+}
+
+func newNormalFloat(val float64) NormalValue {
+ return normalFloat{newBaseNormalValue(val)}
+}
+
+// NewNormalBool creates a new NormalValue that represents a `bool` value.
+func NewNormalBool(val bool) NormalValue {
+ return normalBool{baseNormalValue[bool]{val: val}}
+}
+
+// NewNormalInt creates a new NormalValue that represents an `int64` value.
+func NewNormalInt[T constraints.Integer | constraints.Float](val T) NormalValue {
+ return normalInt{baseNormalValue[int64]{val: int64(val)}}
+}
+
+// NewNormalFloat creates a new NormalValue that represents a `float64` value.
+func NewNormalFloat[T constraints.Integer | constraints.Float](val T) NormalValue {
+ return normalFloat{baseNormalValue[float64]{val: float64(val)}}
+}
+
+// NewNormalString creates a new NormalValue that represents a `string` value.
+func NewNormalString[T string | []byte](val T) NormalValue {
+ return normalString{baseNormalValue[string]{val: string(val)}}
+}
+
+// NewNormalBytes creates a new NormalValue that represents a `[]byte` value.
+func NewNormalBytes[T string | []byte](val T) NormalValue {
+ return normalBytes{baseNormalValue[[]byte]{val: []byte(val)}}
+}
+
+// NewNormalTime creates a new NormalValue that represents a `time.Time` value.
+func NewNormalTime(val time.Time) NormalValue {
+ return normalTime{baseNormalValue[time.Time]{val: val}}
+}
+
+// NewNormalDocument creates a new NormalValue that represents a `*Document` value.
+func NewNormalDocument(val *Document) NormalValue {
+ return normalDocument{baseNormalValue[*Document]{val: val}}
+}
diff --git a/client/normal_util.go b/client/normal_util.go
new file mode 100644
index 0000000000..87310d9631
--- /dev/null
+++ b/client/normal_util.go
@@ -0,0 +1,118 @@
+// Copyright 2024 Democratized Data Foundation
+//
+// Use of this software is governed by the Business Source License
+// included in the file licenses/BSL.txt.
+//
+// As of the Change Date specified in that file, in accordance with
+// the Business Source License, use of this software will be governed
+// by the Apache License, Version 2.0, included in the file
+// licenses/APL.txt.
+
+package client
+
+// ToArrayOfNormalValues converts a NormalValue into a slice of NormalValue if the given value
+// is an array. If the given value is not an array, an error is returned.
+func ToArrayOfNormalValues(val NormalValue) ([]NormalValue, error) {
+ if !val.IsArray() {
+ return nil, NewCanNotTurnNormalValueIntoArray(val)
+ }
+ if !val.IsNillable() {
+ if v, ok := val.BoolArray(); ok {
+ return toNormalArray(v, NewNormalBool), nil
+ }
+ if v, ok := val.IntArray(); ok {
+ return toNormalArray(v, NewNormalInt), nil
+ }
+ if v, ok := val.FloatArray(); ok {
+ return toNormalArray(v, NewNormalFloat), nil
+ }
+ if v, ok := val.StringArray(); ok {
+ return toNormalArray(v, NewNormalString), nil
+ }
+ if v, ok := val.BytesArray(); ok {
+ return toNormalArray(v, NewNormalBytes), nil
+ }
+ if v, ok := val.TimeArray(); ok {
+ return toNormalArray(v, NewNormalTime), nil
+ }
+ if v, ok := val.DocumentArray(); ok {
+ return toNormalArray(v, NewNormalDocument), nil
+ }
+ if v, ok := val.NillableBoolArray(); ok {
+ return toNormalArray(v, NewNormalNillableBool), nil
+ }
+ if v, ok := val.NillableIntArray(); ok {
+ return toNormalArray(v, NewNormalNillableInt), nil
+ }
+ if v, ok := val.NillableFloatArray(); ok {
+ return toNormalArray(v, NewNormalNillableFloat), nil
+ }
+ if v, ok := val.NillableStringArray(); ok {
+ return toNormalArray(v, NewNormalNillableString), nil
+ }
+ if v, ok := val.NillableBytesArray(); ok {
+ return toNormalArray(v, NewNormalNillableBytes), nil
+ }
+ if v, ok := val.NillableTimeArray(); ok {
+ return toNormalArray(v, NewNormalNillableTime), nil
+ }
+ if v, ok := val.NillableDocumentArray(); ok {
+ return toNormalArray(v, NewNormalNillableDocument), nil
+ }
+ } else {
+ if val.IsNil() {
+ return nil, nil
+ }
+ if v, ok := val.NillableBoolNillableArray(); ok {
+ return toNormalArray(v.Value(), NewNormalNillableBool), nil
+ }
+ if v, ok := val.NillableIntNillableArray(); ok {
+ return toNormalArray(v.Value(), NewNormalNillableInt), nil
+ }
+ if v, ok := val.NillableFloatNillableArray(); ok {
+ return toNormalArray(v.Value(), NewNormalNillableFloat), nil
+ }
+ if v, ok := val.NillableStringNillableArray(); ok {
+ return toNormalArray(v.Value(), NewNormalNillableString), nil
+ }
+ if v, ok := val.NillableBytesNillableArray(); ok {
+ return toNormalArray(v.Value(), NewNormalNillableBytes), nil
+ }
+ if v, ok := val.NillableTimeNillableArray(); ok {
+ return toNormalArray(v.Value(), NewNormalNillableTime), nil
+ }
+ if v, ok := val.NillableDocumentNillableArray(); ok {
+ return toNormalArray(v.Value(), NewNormalNillableDocument), nil
+ }
+ if v, ok := val.BoolNillableArray(); ok {
+ return toNormalArray(v.Value(), NewNormalBool), nil
+ }
+ if v, ok := val.IntNillableArray(); ok {
+ return toNormalArray(v.Value(), NewNormalInt), nil
+ }
+ if v, ok := val.FloatNillableArray(); ok {
+ return toNormalArray(v.Value(), NewNormalFloat), nil
+ }
+ if v, ok := val.StringNillableArray(); ok {
+ return toNormalArray(v.Value(), NewNormalString), nil
+ }
+ if v, ok := val.BytesNillableArray(); ok {
+ return toNormalArray(v.Value(), NewNormalBytes), nil
+ }
+ if v, ok := val.TimeNillableArray(); ok {
+ return toNormalArray(v.Value(), NewNormalTime), nil
+ }
+ if v, ok := val.DocumentNillableArray(); ok {
+ return toNormalArray(v.Value(), NewNormalDocument), nil
+ }
+ }
+ return nil, NewCanNotTurnNormalValueIntoArray(val)
+}
+
+func toNormalArray[T any](val []T, f func(T) NormalValue) []NormalValue {
+ res := make([]NormalValue, len(val))
+ for i := range val {
+ res[i] = f(val[i])
+ }
+ return res
+}
diff --git a/client/normal_value.go b/client/normal_value.go
new file mode 100644
index 0000000000..3f0681fbfc
--- /dev/null
+++ b/client/normal_value.go
@@ -0,0 +1,207 @@
+// Copyright 2024 Democratized Data Foundation
+//
+// Use of this software is governed by the Business Source License
+// included in the file licenses/BSL.txt.
+//
+// As of the Change Date specified in that file, in accordance with
+// the Business Source License, use of this software will be governed
+// by the Apache License, Version 2.0, included in the file
+// licenses/APL.txt.
+
+package client
+
+import (
+ "time"
+
+ "github.com/sourcenetwork/immutable"
+)
+
+// NormalValue is the interface for the normal value types.
+// It is used to represent the normal (or standard) values across the system and to avoid
+// asserting all possible types like int, int32, int64, etc.
+//
+// All methods returning a specific type returns the value and the second boolean flag indicating
+// if the value is of the requested type. They act similar to Go's type assertion.
+//
+// All nillable values are represented as [immutable.Option[T]].
+type NormalValue interface {
+ // Unwrap returns the underlying value.
+ // For not nillable values it will return the value as is.
+ // For nillable values (of type [immutable.Option[T]]) it will return the value itself
+ // if the option has value, otherwise it will return nil.
+ Unwrap() any
+
+ // IsNil returns if the value is nil. For not nillable values it will always return false.
+ IsNil() bool
+ // IsNillable returns if the value can be nil.
+ IsNillable() bool
+ // IsArray returns if the value is an array.
+ IsArray() bool
+
+ // Bool returns the value as a bool. The second return flag is true if the value is a bool.
+ // Otherwise it will return false and false.
+ Bool() (bool, bool)
+ // Int returns the value as an int64. The second return flag is true if the value is an int64.
+ // Otherwise it will return 0 and false.
+ Int() (int64, bool)
+ // Float returns the value as a float64. The second return flag is true if the value is a float64.
+ // Otherwise it will return 0 and false.
+ Float() (float64, bool)
+ // String returns the value as a string. The second return flag is true if the value is a string.
+ // Otherwise it will return "" and false.
+ String() (string, bool)
+ // Bytes returns the value as a []byte. The second return flag is true if the value is a []byte.
+ // Otherwise it will return nil and false.
+ Bytes() ([]byte, bool)
+ // Time returns the value as a [time.Time]. The second return flag is true if the value is a [time.Time].
+ // Otherwise it will return nil and false.
+ Time() (time.Time, bool)
+ // Document returns the value as a [*Document]. The second return flag is true if the value is a [*Document].
+ // Otherwise it will return nil and false.
+ Document() (*Document, bool)
+
+ // NillableBool returns the value as a nillable bool.
+ // The second return flag is true if the value is [immutable.Option[bool]].
+ // Otherwise it will return [immutable.None[bool]()] and false.
+ NillableBool() (immutable.Option[bool], bool)
+ // NillableInt returns the value as a nillable int64.
+ // The second return flag is true if the value is [immutable.Option[int64]].
+ // Otherwise it will return [immutable.None[int64]()] and false.
+ NillableInt() (immutable.Option[int64], bool)
+ // NillableFloat returns the value as a nillable float64.
+ // The second return flag is true if the value is [immutable.Option[float64]].
+ // Otherwise it will return [immutable.None[float64]()] and false.
+ NillableFloat() (immutable.Option[float64], bool)
+ // NillableString returns the value as a nillable string.
+ // The second return flag is true if the value is [immutable.Option[string]].
+ // Otherwise it will return [immutable.None[string]()] and false.
+ NillableString() (immutable.Option[string], bool)
+ // NillableBytes returns the value as a nillable byte slice.
+ // The second return flag is true if the value is [immutable.Option[[]byte]].
+ // Otherwise it will return [immutable.None[[]byte]()] and false.
+ NillableBytes() (immutable.Option[[]byte], bool)
+ // NillableTime returns the value as a nillable time.Time.
+ // The second return flag is true if the value is [immutable.Option[time.Time]].
+ // Otherwise it will return [immutable.None[time.Time]()] and false.
+ NillableTime() (immutable.Option[time.Time], bool)
+ // NillableDocument returns the value as a nillable *Document.
+ // The second return flag is true if the value is [immutable.Option[*Document]].
+ // Otherwise it will return [immutable.None[*Document]()] and false.
+ NillableDocument() (immutable.Option[*Document], bool)
+
+ // BoolArray returns the value as a bool array.
+ // The second return flag is true if the value is a []bool.
+ // Otherwise it will return nil and false.
+ BoolArray() ([]bool, bool)
+ // IntArray returns the value as an int64 array.
+ // The second return flag is true if the value is a []int64.
+ // Otherwise it will return nil and false.
+ IntArray() ([]int64, bool)
+ // FloatArray returns the value as a float64 array.
+ // The second return flag is true if the value is a []float64.
+ // Otherwise it will return nil and false.
+ FloatArray() ([]float64, bool)
+ // StringArray returns the value as a string array.
+ // The second return flag is true if the value is a []string.
+ // Otherwise it will return nil and false.
+ StringArray() ([]string, bool)
+ // BytesArray returns the value as a byte slice array.
+ // The second return flag is true if the value is a [][]byte.
+ // Otherwise it will return nil and false.
+ BytesArray() ([][]byte, bool)
+ // TimeArray returns the value as a time.Time array.
+ // The second return flag is true if the value is a [[]time.Time].
+ // Otherwise it will return nil and false.
+ TimeArray() ([]time.Time, bool)
+ // DocumentArray returns the value as a [*Document] array.
+ // The second return flag is true if the value is a [[]*Document].
+ // Otherwise it will return nil and false.
+ DocumentArray() ([]*Document, bool)
+
+ // NillableBoolArray returns the value as nillable array of bool elements.
+ // The second return flag is true if the value is [immutable.Option[[]bool]].
+ // Otherwise it will return [immutable.None[[]bool]()] and false.
+ BoolNillableArray() (immutable.Option[[]bool], bool)
+ // NillableIntArray returns the value as nillable array of int64 elements.
+ // The second return flag is true if the value is [immutable.Option[[]int64]].
+ // Otherwise it will return [immutable.None[[]int64]()] and false.
+ IntNillableArray() (immutable.Option[[]int64], bool)
+ // NillableFloatArray returns the value as nillable array of float64 elements.
+ // The second return flag is true if the value is [immutable.Option[[]float64]].
+ // Otherwise it will return [immutable.None[[]float64]()] and false.
+ FloatNillableArray() (immutable.Option[[]float64], bool)
+ // NillableStringArray returns the value as nillable array of string elements.
+ // The second return flag is true if the value is [immutable.Option[[]string]].
+ // Otherwise it will return [immutable.None[[]string]()] and false.
+ StringNillableArray() (immutable.Option[[]string], bool)
+ // NillableBytesArray returns the value as nillable array of byte slice elements.
+ // The second return flag is true if the value is [immutable.Option[[][]byte]].
+ // Otherwise it will return [immutable.None[[][]byte]()] and false.
+ BytesNillableArray() (immutable.Option[[][]byte], bool)
+ // NillableTimeArray returns the value as nillable array of [time.Time] elements.
+ // The second return flag is true if the value is [immutable.Option[[]time.Time]].
+ // Otherwise it will return [immutable.None[[]time.Time]()] and false.
+ TimeNillableArray() (immutable.Option[[]time.Time], bool)
+ // NillableDocumentArray returns the value as nillable array of [*Document] elements.
+ // The second return flag is true if the value is [immutable.Option[[]*Document]].
+ // Otherwise it will return [immutable.None[[]*Document]()] and false.
+ DocumentNillableArray() (immutable.Option[[]*Document], bool)
+
+ // NillableBoolArray returns the value as array of nillable bool elements.
+ // The second return flag is true if the value is []immutable.Option[bool].
+ // Otherwise it will return nil and false.
+ NillableBoolArray() ([]immutable.Option[bool], bool)
+ // NillableIntArray returns the value as array of nillable int64 elements.
+ // The second return flag is true if the value is []immutable.Option[int64].
+ // Otherwise it will return nil and false.
+ NillableIntArray() ([]immutable.Option[int64], bool)
+ // NillableFloatArray returns the value as array of nillable float64 elements.
+ // The second return flag is true if the value is []immutable.Option[float64].
+ // Otherwise it will return nil and false.
+ NillableFloatArray() ([]immutable.Option[float64], bool)
+ // NillableStringArray returns the value as array of nillable string elements.
+ // The second return flag is true if the value is []immutable.Option[string].
+ // Otherwise it will return nil and false.
+ NillableStringArray() ([]immutable.Option[string], bool)
+ // NillableBytesArray returns the value as array of nillable byte slice elements.
+ // The second return flag is true if the value is []immutable.Option[[]byte].
+ // Otherwise it will return nil and false.
+ NillableBytesArray() ([]immutable.Option[[]byte], bool)
+ // NillableTimeArray returns the value as array of nillable time.Time elements.
+ // The second return flag is true if the value is []immutable.Option[time.Time].
+ // Otherwise it will return nil and false.
+ NillableTimeArray() ([]immutable.Option[time.Time], bool)
+ // NillableDocumentArray returns the value as array of nillable *Document elements.
+ // The second return flag is true if the value is []immutable.Option[*Document].
+ // Otherwise it will return nil and false.
+ NillableDocumentArray() ([]immutable.Option[*Document], bool)
+
+ // NillableBoolNillableArray returns the value as nillable array of nillable bool elements.
+ // The second return flag is true if the value is [immutable.Option[[]immutable.Option[bool]]].
+ // Otherwise it will return [immutable.None[[]immutable.Option[bool]]()] and false.
+ NillableBoolNillableArray() (immutable.Option[[]immutable.Option[bool]], bool)
+ // NillableIntNillableArray returns the value as nillable array of nillable int64 elements.
+ // The second return flag is true if the value is [immutable.Option[[]immutable.Option[int64]]].
+ // Otherwise it will return [immutable.None[[]immutable.Option[int64]]()] and false.
+ NillableIntNillableArray() (immutable.Option[[]immutable.Option[int64]], bool)
+ // NillableFloatNillableArray returns the value as nillable array of nillable float64 elements.
+ // The second return flag is true if the value is [immutable.Option[[]immutable.Option[float64]]].
+ // Otherwise it will return [immutable.None[[]immutable.Option[float64]]()] and false.
+ NillableFloatNillableArray() (immutable.Option[[]immutable.Option[float64]], bool)
+ // NillableStringNillableArray returns the value as nillable array of nillable string elements.
+ // The second return flag is true if the value is [immutable.Option[[]immutable.Option[string]]].
+ // Otherwise it will return [immutable.None[[]immutable.Option[string]]()] and false.
+ NillableStringNillableArray() (immutable.Option[[]immutable.Option[string]], bool)
+ // NillableBytesNillableArray returns the value as nillable array of nillable byte slice elements.
+ // The second return flag is true if the value is [immutable.Option[[]immutable.Option[[]byte]]].
+ // Otherwise it will return [immutable.None[[]immutable.Option[[]byte]]()] and false.
+ NillableBytesNillableArray() (immutable.Option[[]immutable.Option[[]byte]], bool)
+ // NillableTimeNillableArray returns the value as nillable array of nillable time.Time elements.
+ // The second return flag is true if the value is [immutable.Option[[]immutable.Option[time.Time]]].
+ // Otherwise it will return [immutable.None[[]immutable.Option[time.Time]]()] and false.
+ NillableTimeNillableArray() (immutable.Option[[]immutable.Option[time.Time]], bool)
+ // NillableDocumentNillableArray returns the value as nillable array of nillable *Document elements.
+ // The second return flag is true if the value is [immutable.Option[[]immutable.Option[*Document]]].
+ // Otherwise it will return [immutable.None[[]immutable.Option[*Document]]()] and false.
+ NillableDocumentNillableArray() (immutable.Option[[]immutable.Option[*Document]], bool)
+}
diff --git a/client/normal_value_test.go b/client/normal_value_test.go
new file mode 100644
index 0000000000..33cd20c46e
--- /dev/null
+++ b/client/normal_value_test.go
@@ -0,0 +1,1649 @@
+// Copyright 2024 Democratized Data Foundation
+//
+// Use of this software is governed by the Business Source License
+// included in the file licenses/BSL.txt.
+//
+// As of the Change Date specified in that file, in accordance with
+// the Business Source License, use of this software will be governed
+// by the Apache License, Version 2.0, included in the file
+// licenses/APL.txt.
+
+package client
+
+import (
+ "reflect"
+ "testing"
+ "time"
+
+ "github.com/sourcenetwork/immutable"
+ "github.com/stretchr/testify/assert"
+ "github.com/stretchr/testify/require"
+)
+
+type nType string
+
+const (
+ BoolType nType = "Bool"
+ IntType nType = "Int"
+ FloatType nType = "Float"
+ StringType nType = "String"
+ BytesType nType = "Bytes"
+ TimeType nType = "Time"
+ DocumentType nType = "Document"
+
+ NillableBoolType nType = "NillableBool"
+ NillableIntType nType = "NillableInt"
+ NillableFloatType nType = "NillableFloat"
+ NillableStringType nType = "NillableString"
+ NillableBytesType nType = "NillableBytes"
+ NillableTimeType nType = "NillableTime"
+ NillableDocumentType nType = "NillableDocument"
+
+ BoolArray nType = "BoolArray"
+ IntArray nType = "IntArray"
+ FloatArray nType = "FloatArray"
+ StringArray nType = "StringArray"
+ BytesArray nType = "BytesArray"
+ TimeArray nType = "TimeArray"
+ DocumentArray nType = "DocumentArray"
+
+ NillableBoolArray nType = "NillableBoolArray"
+ NillableIntArray nType = "NillableIntArray"
+ NillableFloatArray nType = "NillableFloatArray"
+ NillableStringArray nType = "NillableStringArray"
+ NillableBytesArray nType = "NillableBytesArray"
+ NillableTimeArray nType = "NillableTimeArray"
+ NillableDocumentArray nType = "NillableDocumentArray"
+
+ BoolNillableArray nType = "BoolNillableArray"
+ IntNillableArray nType = "IntNillableArray"
+ FloatNillableArray nType = "FloatNillableArray"
+ StringNillableArray nType = "StringNillableArray"
+ BytesNillableArray nType = "BytesNillableArray"
+ TimeNillableArray nType = "TimeNillableArray"
+ DocumentNillableArray nType = "DocumentNillableArray"
+
+ NillableBoolNillableArray nType = "NillableBoolNillableArray"
+ NillableIntNillableArray nType = "NillableIntNillableArray"
+ NillableFloatNillableArray nType = "NillableFloatNillableArray"
+ NillableStringNillableArray nType = "NillableStringNillableArray"
+ NillableBytesNillableArray nType = "NillableBytesNillableArray"
+ NillableTimeNillableArray nType = "NillableTimeNillableArray"
+ NillableDocumentNillableArray nType = "NillableDocumentNillableArray"
+)
+
+// extractValue takes an input of type `any` and checks if it is an `Option[T]`.
+// If it is and contains a value, it returns the contained value.
+// Otherwise, it returns the input itself.
+func extractValue(input any) any {
+ inputVal := reflect.ValueOf(input)
+
+ // Check if the type is Option[T] by seeing if it has the HasValue and Value methods.
+ hasValueMethod := inputVal.MethodByName("HasValue")
+ valueMethod := inputVal.MethodByName("Value")
+
+ if hasValueMethod.IsValid() && valueMethod.IsValid() {
+ // Call HasValue to check if there's a value.
+ hasValueResult := hasValueMethod.Call(nil)
+ if len(hasValueResult) == 1 {
+ if hasValueResult[0].Bool() {
+ // Call Value to get the actual value if HasValue is true.
+ valueResult := valueMethod.Call(nil)
+ if len(valueResult) == 1 {
+ return valueResult[0].Interface()
+ }
+ } else {
+ // Return nil if HasValue is false.
+ return nil
+ }
+ }
+ }
+
+ // Return the input itself if it's not an Option[T] with a value.
+ return input
+}
+
+func TestNormalValue_NewValueAndTypeAssertion(t *testing.T) {
+ typeAssertMap := map[nType]func(NormalValue) (any, bool){
+ BoolType: func(v NormalValue) (any, bool) { return v.Bool() },
+ IntType: func(v NormalValue) (any, bool) { return v.Int() },
+ FloatType: func(v NormalValue) (any, bool) { return v.Float() },
+ StringType: func(v NormalValue) (any, bool) { return v.String() },
+ BytesType: func(v NormalValue) (any, bool) { return v.Bytes() },
+ TimeType: func(v NormalValue) (any, bool) { return v.Time() },
+ DocumentType: func(v NormalValue) (any, bool) { return v.Document() },
+
+ NillableBoolType: func(v NormalValue) (any, bool) { return v.NillableBool() },
+ NillableIntType: func(v NormalValue) (any, bool) { return v.NillableInt() },
+ NillableFloatType: func(v NormalValue) (any, bool) { return v.NillableFloat() },
+ NillableStringType: func(v NormalValue) (any, bool) { return v.NillableString() },
+ NillableBytesType: func(v NormalValue) (any, bool) { return v.NillableBytes() },
+ NillableTimeType: func(v NormalValue) (any, bool) { return v.NillableTime() },
+ NillableDocumentType: func(v NormalValue) (any, bool) { return v.NillableDocument() },
+
+ BoolArray: func(v NormalValue) (any, bool) { return v.BoolArray() },
+ IntArray: func(v NormalValue) (any, bool) { return v.IntArray() },
+ FloatArray: func(v NormalValue) (any, bool) { return v.FloatArray() },
+ StringArray: func(v NormalValue) (any, bool) { return v.StringArray() },
+ BytesArray: func(v NormalValue) (any, bool) { return v.BytesArray() },
+ TimeArray: func(v NormalValue) (any, bool) { return v.TimeArray() },
+ DocumentArray: func(v NormalValue) (any, bool) { return v.DocumentArray() },
+
+ BoolNillableArray: func(v NormalValue) (any, bool) { return v.BoolNillableArray() },
+ IntNillableArray: func(v NormalValue) (any, bool) { return v.IntNillableArray() },
+ FloatNillableArray: func(v NormalValue) (any, bool) { return v.FloatNillableArray() },
+ StringNillableArray: func(v NormalValue) (any, bool) { return v.StringNillableArray() },
+ BytesNillableArray: func(v NormalValue) (any, bool) { return v.BytesNillableArray() },
+ TimeNillableArray: func(v NormalValue) (any, bool) { return v.TimeNillableArray() },
+ DocumentNillableArray: func(v NormalValue) (any, bool) { return v.DocumentNillableArray() },
+
+ NillableBoolArray: func(v NormalValue) (any, bool) { return v.NillableBoolArray() },
+ NillableIntArray: func(v NormalValue) (any, bool) { return v.NillableIntArray() },
+ NillableFloatArray: func(v NormalValue) (any, bool) { return v.NillableFloatArray() },
+ NillableStringArray: func(v NormalValue) (any, bool) { return v.NillableStringArray() },
+ NillableBytesArray: func(v NormalValue) (any, bool) { return v.NillableBytesArray() },
+ NillableTimeArray: func(v NormalValue) (any, bool) { return v.NillableTimeArray() },
+ NillableDocumentArray: func(v NormalValue) (any, bool) { return v.NillableDocumentArray() },
+
+ NillableBoolNillableArray: func(v NormalValue) (any, bool) { return v.NillableBoolNillableArray() },
+ NillableIntNillableArray: func(v NormalValue) (any, bool) { return v.NillableIntNillableArray() },
+ NillableFloatNillableArray: func(v NormalValue) (any, bool) { return v.NillableFloatNillableArray() },
+ NillableStringNillableArray: func(v NormalValue) (any, bool) { return v.NillableStringNillableArray() },
+ NillableBytesNillableArray: func(v NormalValue) (any, bool) { return v.NillableBytesNillableArray() },
+ NillableTimeNillableArray: func(v NormalValue) (any, bool) { return v.NillableTimeNillableArray() },
+ NillableDocumentNillableArray: func(v NormalValue) (any, bool) {
+ return v.NillableDocumentNillableArray()
+ },
+ }
+
+ newMap := map[nType]func(any) NormalValue{
+ BoolType: func(v any) NormalValue { return NewNormalBool(v.(bool)) },
+ IntType: func(v any) NormalValue { return NewNormalInt(v.(int64)) },
+ FloatType: func(v any) NormalValue { return NewNormalFloat(v.(float64)) },
+ StringType: func(v any) NormalValue { return NewNormalString(v.(string)) },
+ BytesType: func(v any) NormalValue { return NewNormalBytes(v.([]byte)) },
+ TimeType: func(v any) NormalValue { return NewNormalTime(v.(time.Time)) },
+ DocumentType: func(v any) NormalValue { return NewNormalDocument(v.(*Document)) },
+
+ NillableBoolType: func(v any) NormalValue { return NewNormalNillableBool(v.(immutable.Option[bool])) },
+ NillableIntType: func(v any) NormalValue { return NewNormalNillableInt(v.(immutable.Option[int64])) },
+ NillableFloatType: func(v any) NormalValue { return NewNormalNillableFloat(v.(immutable.Option[float64])) },
+ NillableStringType: func(v any) NormalValue { return NewNormalNillableString(v.(immutable.Option[string])) },
+ NillableBytesType: func(v any) NormalValue { return NewNormalNillableBytes(v.(immutable.Option[[]byte])) },
+ NillableTimeType: func(v any) NormalValue { return NewNormalNillableTime(v.(immutable.Option[time.Time])) },
+ NillableDocumentType: func(v any) NormalValue { return NewNormalNillableDocument(v.(immutable.Option[*Document])) },
+
+ BoolArray: func(v any) NormalValue { return NewNormalBoolArray(v.([]bool)) },
+ IntArray: func(v any) NormalValue { return NewNormalIntArray(v.([]int64)) },
+ FloatArray: func(v any) NormalValue { return NewNormalFloatArray(v.([]float64)) },
+ StringArray: func(v any) NormalValue { return NewNormalStringArray(v.([]string)) },
+ BytesArray: func(v any) NormalValue { return NewNormalBytesArray(v.([][]byte)) },
+ TimeArray: func(v any) NormalValue { return NewNormalTimeArray(v.([]time.Time)) },
+ DocumentArray: func(v any) NormalValue { return NewNormalDocumentArray(v.([]*Document)) },
+
+ NillableBoolArray: func(v any) NormalValue {
+ return NewNormalNillableBoolArray(v.([]immutable.Option[bool]))
+ },
+ NillableIntArray: func(v any) NormalValue {
+ return NewNormalNillableIntArray(v.([]immutable.Option[int64]))
+ },
+ NillableFloatArray: func(v any) NormalValue {
+ return NewNormalNillableFloatArray(v.([]immutable.Option[float64]))
+ },
+ NillableStringArray: func(v any) NormalValue {
+ return NewNormalNillableStringArray(v.([]immutable.Option[string]))
+ },
+ NillableBytesArray: func(v any) NormalValue {
+ return NewNormalNillableBytesArray(v.([]immutable.Option[[]byte]))
+ },
+ NillableTimeArray: func(v any) NormalValue {
+ return NewNormalNillableTimeArray(v.([]immutable.Option[time.Time]))
+ },
+ NillableDocumentArray: func(v any) NormalValue {
+ return NewNormalNillableDocumentArray(v.([]immutable.Option[*Document]))
+ },
+
+ BoolNillableArray: func(v any) NormalValue {
+ return NewNormalBoolNillableArray(v.(immutable.Option[[]bool]))
+ },
+ IntNillableArray: func(v any) NormalValue {
+ return NewNormalIntNillableArray(v.(immutable.Option[[]int64]))
+ },
+ FloatNillableArray: func(v any) NormalValue {
+ return NewNormalFloatNillableArray(v.(immutable.Option[[]float64]))
+ },
+ StringNillableArray: func(v any) NormalValue {
+ return NewNormalStringNillableArray(v.(immutable.Option[[]string]))
+ },
+ BytesNillableArray: func(v any) NormalValue {
+ return NewNormalBytesNillableArray(v.(immutable.Option[[][]byte]))
+ },
+ TimeNillableArray: func(v any) NormalValue {
+ return NewNormalTimeNillableArray(v.(immutable.Option[[]time.Time]))
+ },
+ DocumentNillableArray: func(v any) NormalValue {
+ return NewNormalDocumentNillableArray(v.(immutable.Option[[]*Document]))
+ },
+
+ NillableBoolNillableArray: func(v any) NormalValue {
+ return NewNormalNillableBoolNillableArray(v.(immutable.Option[[]immutable.Option[bool]]))
+ },
+ NillableIntNillableArray: func(v any) NormalValue {
+ return NewNormalNillableIntNillableArray(v.(immutable.Option[[]immutable.Option[int64]]))
+ },
+ NillableFloatNillableArray: func(v any) NormalValue {
+ return NewNormalNillableFloatNillableArray(v.(immutable.Option[[]immutable.Option[float64]]))
+ },
+ NillableStringNillableArray: func(v any) NormalValue {
+ return NewNormalNillableStringNillableArray(v.(immutable.Option[[]immutable.Option[string]]))
+ },
+ NillableBytesNillableArray: func(v any) NormalValue {
+ return NewNormalNillableBytesNillableArray(v.(immutable.Option[[]immutable.Option[[]byte]]))
+ },
+ NillableTimeNillableArray: func(v any) NormalValue {
+ return NewNormalNillableTimeNillableArray(v.(immutable.Option[[]immutable.Option[time.Time]]))
+ },
+ NillableDocumentNillableArray: func(v any) NormalValue {
+ return NewNormalNillableDocumentNillableArray(v.(immutable.Option[[]immutable.Option[*Document]]))
+ },
+ }
+
+ tests := []struct {
+ nType nType
+ input any
+ isNillable bool
+ isNil bool
+ isArray bool
+ }{
+ {
+ nType: BoolType,
+ input: true,
+ },
+ {
+ nType: IntType,
+ input: int64(1),
+ },
+ {
+ nType: FloatType,
+ input: float64(1),
+ },
+ {
+ nType: StringType,
+ input: "test",
+ },
+ {
+ nType: BytesType,
+ input: []byte{1, 2, 3},
+ },
+ {
+ nType: TimeType,
+ input: time.Now(),
+ },
+ {
+ nType: DocumentType,
+ input: &Document{},
+ },
+ {
+ nType: NillableBoolType,
+ input: immutable.Some(true),
+ isNillable: true,
+ },
+ {
+ nType: NillableBoolType,
+ input: immutable.None[bool](),
+ isNil: true,
+ isNillable: true,
+ },
+ {
+ nType: NillableIntType,
+ input: immutable.Some(int64(1)),
+ isNillable: true,
+ },
+ {
+ nType: NillableIntType,
+ input: immutable.None[int64](),
+ isNil: true,
+ isNillable: true,
+ },
+ {
+ nType: NillableFloatType,
+ input: immutable.Some(float64(1)),
+ isNillable: true,
+ },
+ {
+ nType: NillableFloatType,
+ input: immutable.None[float64](),
+ isNil: true,
+ isNillable: true,
+ },
+ {
+ nType: NillableStringType,
+ input: immutable.Some("test"),
+ isNillable: true,
+ },
+ {
+ nType: NillableStringType,
+ input: immutable.None[string](),
+ isNil: true,
+ isNillable: true,
+ },
+ {
+ nType: NillableBytesType,
+ input: immutable.Some([]byte{1, 2, 3}),
+ isNillable: true,
+ },
+ {
+ nType: NillableBytesType,
+ input: immutable.None[[]byte](),
+ isNil: true,
+ isNillable: true,
+ },
+ {
+ nType: NillableTimeType,
+ input: immutable.Some(time.Now()),
+ isNillable: true,
+ },
+ {
+ nType: NillableTimeType,
+ input: immutable.None[time.Time](),
+ isNil: true,
+ isNillable: true,
+ },
+ {
+ nType: NillableDocumentType,
+ input: immutable.Some(&Document{}),
+ isNillable: true,
+ },
+ {
+ nType: NillableDocumentType,
+ input: immutable.None[*Document](),
+ isNil: true,
+ isNillable: true,
+ },
+ {
+ nType: BoolArray,
+ input: []bool{true, false},
+ isArray: true,
+ },
+ {
+ nType: IntArray,
+ input: []int64{1, 2, 3},
+ isArray: true,
+ },
+ {
+ nType: FloatArray,
+ input: []float64{1, 2, 3},
+ isArray: true,
+ },
+ {
+ nType: StringArray,
+ input: []string{"test", "test2"},
+ isArray: true,
+ },
+ {
+ nType: BytesArray,
+ input: [][]byte{{1, 2, 3}, {4, 5, 6}},
+ isArray: true,
+ },
+ {
+ nType: TimeArray,
+ input: []time.Time{time.Now(), time.Now()},
+ isArray: true,
+ },
+ {
+ nType: DocumentArray,
+ input: []*Document{{}, {}},
+ isArray: true,
+ },
+ {
+ nType: NillableBoolArray,
+ input: []immutable.Option[bool]{immutable.Some(true)},
+ isArray: true,
+ },
+ {
+ nType: NillableIntArray,
+ input: []immutable.Option[int64]{immutable.Some(int64(1))},
+ isArray: true,
+ },
+ {
+ nType: NillableFloatArray,
+ input: []immutable.Option[float64]{immutable.Some(float64(1))},
+ isArray: true,
+ },
+ {
+ nType: NillableStringArray,
+ input: []immutable.Option[string]{immutable.Some("test")},
+ isArray: true,
+ },
+ {
+ nType: NillableBytesArray,
+ input: []immutable.Option[[]byte]{immutable.Some([]byte{1, 2, 3})},
+ isArray: true,
+ },
+ {
+ nType: NillableTimeArray,
+ input: []immutable.Option[time.Time]{immutable.Some(time.Now())},
+ isArray: true,
+ },
+ {
+ nType: NillableDocumentArray,
+ input: []immutable.Option[*Document]{immutable.Some(&Document{})},
+ isArray: true,
+ },
+ {
+ nType: BoolNillableArray,
+ input: immutable.Some([]bool{true, false}),
+ isNillable: true,
+ isArray: true,
+ },
+ {
+ nType: BoolNillableArray,
+ input: immutable.None[[]bool](),
+ isNillable: true,
+ isNil: true,
+ isArray: true,
+ },
+ {
+ nType: IntNillableArray,
+ input: immutable.Some([]int64{1, 2, 3}),
+ isNillable: true,
+ isArray: true,
+ },
+ {
+ nType: IntNillableArray,
+ input: immutable.None[[]int64](),
+ isNillable: true,
+ isNil: true,
+ isArray: true,
+ },
+ {
+ nType: FloatNillableArray,
+ input: immutable.Some([]float64{1, 2, 3}),
+ isNillable: true,
+ isArray: true,
+ },
+ {
+ nType: FloatNillableArray,
+ input: immutable.None[[]float64](),
+ isNillable: true,
+ isNil: true,
+ isArray: true,
+ },
+ {
+ nType: StringNillableArray,
+ input: immutable.Some([]string{"test", "test2"}),
+ isNillable: true,
+ isArray: true,
+ },
+ {
+ nType: StringNillableArray,
+ input: immutable.None[[]string](),
+ isNillable: true,
+ isNil: true,
+ isArray: true,
+ },
+ {
+ nType: BytesNillableArray,
+ input: immutable.Some([][]byte{{1, 2, 3}, {4, 5, 6}}),
+ isNillable: true,
+ isArray: true,
+ },
+ {
+ nType: BytesNillableArray,
+ input: immutable.None[[][]byte](),
+ isNillable: true,
+ isNil: true,
+ isArray: true,
+ },
+ {
+ nType: TimeNillableArray,
+ input: immutable.Some([]time.Time{time.Now(), time.Now()}),
+ isNillable: true,
+ isArray: true,
+ },
+ {
+ nType: TimeNillableArray,
+ input: immutable.None[[]time.Time](),
+ isNillable: true,
+ isNil: true,
+ isArray: true,
+ },
+ {
+ nType: DocumentNillableArray,
+ input: immutable.Some([]*Document{{}, {}}),
+ isNillable: true,
+ isArray: true,
+ },
+ {
+ nType: DocumentNillableArray,
+ input: immutable.None[[]*Document](),
+ isNillable: true,
+ isNil: true,
+ isArray: true,
+ },
+ {
+ nType: NillableBoolNillableArray,
+ input: immutable.Some([]immutable.Option[bool]{immutable.Some(true)}),
+ isNillable: true,
+ isArray: true,
+ },
+ {
+ nType: NillableBoolNillableArray,
+ input: immutable.None[[]immutable.Option[bool]](),
+ isNillable: true,
+ isNil: true,
+ isArray: true,
+ },
+ {
+ nType: NillableIntNillableArray,
+ input: immutable.Some([]immutable.Option[int64]{immutable.Some(int64(1))}),
+ isNillable: true,
+ isArray: true,
+ },
+ {
+ nType: NillableIntNillableArray,
+ input: immutable.None[[]immutable.Option[int64]](),
+ isNillable: true,
+ isNil: true,
+ isArray: true,
+ },
+ {
+ nType: NillableFloatNillableArray,
+ input: immutable.Some([]immutable.Option[float64]{immutable.Some(float64(1))}),
+ isNillable: true,
+ isArray: true,
+ },
+ {
+ nType: NillableFloatNillableArray,
+ input: immutable.None[[]immutable.Option[float64]](),
+ isNillable: true,
+ isNil: true,
+ isArray: true,
+ },
+ {
+ nType: NillableStringNillableArray,
+ input: immutable.Some([]immutable.Option[string]{immutable.Some("test")}),
+ isNillable: true,
+ isArray: true,
+ },
+ {
+ nType: NillableStringNillableArray,
+ input: immutable.None[[]immutable.Option[string]](),
+ isNillable: true,
+ isNil: true,
+ isArray: true,
+ },
+ {
+ nType: NillableBytesNillableArray,
+ input: immutable.Some([]immutable.Option[[]byte]{immutable.Some([]byte{1, 2, 3})}),
+ isNillable: true,
+ isArray: true,
+ },
+ {
+ nType: NillableBytesNillableArray,
+ input: immutable.None[[]immutable.Option[[]byte]](),
+ isNillable: true,
+ isNil: true,
+ isArray: true,
+ },
+ {
+ nType: NillableTimeNillableArray,
+ input: immutable.Some([]immutable.Option[time.Time]{immutable.Some(time.Now())}),
+ isNillable: true,
+ isArray: true,
+ },
+ {
+ nType: NillableTimeNillableArray,
+ input: immutable.None[[]immutable.Option[time.Time]](),
+ isNillable: true,
+ isNil: true,
+ isArray: true,
+ },
+ {
+ nType: NillableDocumentNillableArray,
+ input: immutable.Some([]immutable.Option[*Document]{immutable.Some(&Document{})}),
+ isNillable: true,
+ isArray: true,
+ },
+ }
+
+ for _, tt := range tests {
+ tStr := string(tt.nType)
+ t.Run(tStr, func(t *testing.T) {
+ actual, err := NewNormalValue(tt.input)
+ require.NoError(t, err)
+
+ for nType, typeAssertFunc := range typeAssertMap {
+ val, ok := typeAssertFunc(actual)
+ if nType == tt.nType {
+ assert.True(t, ok, tStr+"() should return true")
+ assert.Equal(t, tt.input, val, tStr+"() returned unexpected value")
+ newVal := newMap[nType](val)
+ assert.Equal(t, actual, newVal, "New"+tStr+"() returned unexpected NormalValue")
+ assert.Equal(t, extractValue(tt.input), actual.Unwrap(),
+ "Unwrap() returned unexpected value for "+tStr)
+ } else {
+ assert.False(t, ok, string(nType)+"() should return false for "+tStr)
+ }
+ }
+
+ if tt.isNillable {
+ assert.True(t, actual.IsNillable(), "IsNillable() should return true for "+tStr)
+ } else {
+ assert.False(t, actual.IsNillable(), "IsNillable() should return false for "+tStr)
+ }
+
+ if tt.isNil {
+ assert.True(t, actual.IsNil(), "IsNil() should return true for "+tStr)
+ } else {
+ assert.False(t, actual.IsNil(), "IsNil() should return false for "+tStr)
+ }
+
+ if tt.isArray {
+ assert.True(t, actual.IsArray(), "IsArray() should return true for "+tStr)
+ } else {
+ assert.False(t, actual.IsArray(), "IsArray() should return false for "+tStr)
+ }
+ })
+ }
+}
+
+func TestNormalValue_InUnknownType_ReturnError(t *testing.T) {
+ _, err := NewNormalValue(struct{ name string }{})
+ require.ErrorContains(t, err, errCanNotNormalizeValue)
+}
+
+func TestNormalValue_NewNormalValueFromAnyArray(t *testing.T) {
+ now := time.Now()
+ doc1 := &Document{}
+ doc2 := &Document{}
+
+ tests := []struct {
+ name string
+ input []any
+ expected NormalValue
+ err string
+ }{
+ {
+ name: "nil input",
+ input: nil,
+ err: errCanNotNormalizeValue,
+ },
+ {
+ name: "unknown element type",
+ input: []any{struct{ name string }{}},
+ err: errCanNotNormalizeValue,
+ },
+ {
+ name: "mixed elements type",
+ input: []any{1, "test", true},
+ err: errCanNotNormalizeValue,
+ },
+ {
+ name: "bool elements",
+ input: []any{true, false},
+ expected: NewNormalBoolArray([]bool{true, false}),
+ },
+ {
+ name: "int elements",
+ input: []any{int64(1), int64(2)},
+ expected: NewNormalIntArray([]int64{1, 2}),
+ },
+ {
+ name: "float elements",
+ input: []any{float64(1), float64(2)},
+ expected: NewNormalFloatArray([]float64{1, 2}),
+ },
+ {
+ name: "string elements",
+ input: []any{"test", "test2"},
+ expected: NewNormalStringArray([]string{"test", "test2"}),
+ },
+ {
+ name: "bytes elements",
+ input: []any{[]byte{1, 2, 3}, []byte{4, 5, 6}},
+ expected: NewNormalBytesArray([][]byte{{1, 2, 3}, {4, 5, 6}}),
+ },
+ {
+ name: "time elements",
+ input: []any{now, now},
+ expected: NewNormalTimeArray([]time.Time{now, now}),
+ },
+ {
+ name: "document elements",
+ input: []any{doc1, doc2},
+ expected: NewNormalDocumentArray([]*Document{doc1, doc2}),
+ },
+ {
+ name: "bool and nil elements",
+ input: []any{true, nil, false},
+ expected: NewNormalNillableBoolArray(
+ []immutable.Option[bool]{immutable.Some(true), immutable.None[bool](), immutable.Some(false)},
+ ),
+ },
+ {
+ name: "int and nil elements",
+ input: []any{1, nil, 2},
+ expected: NewNormalNillableIntArray(
+ []immutable.Option[int64]{immutable.Some(int64(1)), immutable.None[int64](), immutable.Some(int64(2))},
+ ),
+ },
+ {
+ name: "float and nil elements",
+ input: []any{1.0, nil, 2.0},
+ expected: NewNormalNillableFloatArray(
+ []immutable.Option[float64]{immutable.Some(1.0), immutable.None[float64](), immutable.Some(2.0)},
+ ),
+ },
+ {
+ name: "string and nil elements",
+ input: []any{"test", nil, "test2"},
+ expected: NewNormalNillableStringArray(
+ []immutable.Option[string]{immutable.Some("test"), immutable.None[string](), immutable.Some("test2")},
+ ),
+ },
+ {
+ name: "bytes and nil elements",
+ input: []any{[]byte{1, 2, 3}, nil, []byte{4, 5, 6}},
+ expected: NewNormalNillableBytesArray(
+ []immutable.Option[[]byte]{
+ immutable.Some([]byte{1, 2, 3}),
+ immutable.None[[]byte](),
+ immutable.Some([]byte{4, 5, 6}),
+ },
+ ),
+ },
+ {
+ name: "time and nil elements",
+ input: []any{now, nil, now},
+ expected: NewNormalNillableTimeArray(
+ []immutable.Option[time.Time]{immutable.Some(now), immutable.None[time.Time](), immutable.Some(now)},
+ ),
+ },
+ {
+ name: "document and nil elements",
+ input: []any{doc1, nil, doc2},
+ expected: NewNormalNillableDocumentArray(
+ []immutable.Option[*Document]{immutable.Some(doc1), immutable.None[*Document](), immutable.Some(doc2)},
+ ),
+ },
+ {
+ name: "mixed int elements",
+ input: []any{int8(1), int16(2), int32(3), int64(4), int(5), uint8(6), uint16(7), uint32(8),
+ uint64(9), uint(10)},
+ expected: NewNormalIntArray([]int64{1, 2, 3, 4, 5, 6, 7, 8, 9, 10}),
+ },
+ {
+ name: "mixed float elements",
+ input: []any{float32(1.5), float64(2.2)},
+ expected: NewNormalFloatArray([]float64{1.5, 2.2}),
+ },
+ {
+ name: "mixed number elements",
+ input: []any{int8(1), int16(2), int32(3), int64(4), int(5), uint8(6), uint16(7), uint32(8),
+ uint64(9), uint(10), float32(1.5), float64(2.2)},
+ expected: NewNormalFloatArray([]float64{1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 1.5, 2.2}),
+ },
+ {
+ name: "mixed int and nil elements",
+ input: []any{int8(1), nil, int16(2), int32(3), int64(4), int(5), uint8(6), uint16(7), uint32(8),
+ uint64(9), nil, uint(10)},
+ expected: NewNormalNillableIntArray(
+ []immutable.Option[int64]{immutable.Some(int64(1)), immutable.None[int64](), immutable.Some(int64(2)),
+ immutable.Some(int64(3)), immutable.Some(int64(4)), immutable.Some(int64(5)), immutable.Some(int64(6)),
+ immutable.Some(int64(7)), immutable.Some(int64(8)), immutable.Some(int64(9)), immutable.None[int64](),
+ immutable.Some(int64(10))},
+ ),
+ },
+ {
+ name: "mixed float and nil elements",
+ input: []any{float32(1.5), nil, float64(2.2)},
+ expected: NewNormalNillableFloatArray(
+ []immutable.Option[float64]{immutable.Some(1.5), immutable.None[float64](), immutable.Some(2.2)},
+ ),
+ },
+ {
+ name: "mixed number and nil elements",
+ input: []any{int8(1), nil, int16(2), int32(3), int64(4), int(5), uint8(6), uint16(7), uint32(8),
+ uint64(9), nil, uint(10), float32(1.5), nil, float64(2.2)},
+ expected: NewNormalNillableFloatArray(
+ []immutable.Option[float64]{
+ immutable.Some(1.0), immutable.None[float64](), immutable.Some(2.0), immutable.Some(3.0),
+ immutable.Some(4.0), immutable.Some(5.0), immutable.Some(6.0), immutable.Some(7.0),
+ immutable.Some(8.0), immutable.Some(9.0), immutable.None[float64](), immutable.Some(10.0),
+ immutable.Some(1.5), immutable.None[float64](), immutable.Some(2.2)},
+ ),
+ },
+ }
+
+ for _, tt := range tests {
+ tStr := string(tt.name)
+ t.Run(tStr, func(t *testing.T) {
+ actual, err := NewNormalValue(tt.input)
+ if tt.err != "" {
+ require.ErrorContains(t, err, tt.err)
+ return
+ }
+
+ assert.Equal(t, tt.expected, actual)
+ })
+ }
+}
+
+func TestNormalValue_NewNormalInt(t *testing.T) {
+ i64 := int64(2)
+ v := NewNormalInt(i64)
+ getInt := func(v NormalValue) int64 { i, _ := v.Int(); return i }
+
+ assert.Equal(t, i64, getInt(v))
+
+ v = NewNormalInt(float32(2.5))
+ assert.Equal(t, i64, getInt(v))
+
+ v = NewNormalInt(float64(2.5))
+ assert.Equal(t, i64, getInt(v))
+
+ v = NewNormalInt(int8(2))
+ assert.Equal(t, i64, getInt(v))
+
+ v = NewNormalInt(int16(2))
+ assert.Equal(t, i64, getInt(v))
+
+ v = NewNormalInt(int32(2))
+ assert.Equal(t, i64, getInt(v))
+
+ v = NewNormalInt(int(2))
+ assert.Equal(t, i64, getInt(v))
+
+ v = NewNormalInt(uint8(2))
+ assert.Equal(t, i64, getInt(v))
+
+ v = NewNormalInt(uint16(2))
+ assert.Equal(t, i64, getInt(v))
+
+ v = NewNormalInt(uint32(2))
+ assert.Equal(t, i64, getInt(v))
+
+ v = NewNormalInt(uint64(2))
+ assert.Equal(t, i64, getInt(v))
+
+ v = NewNormalInt(uint(2))
+ assert.Equal(t, i64, getInt(v))
+}
+
+func TestNormalValue_NewNormalFloat(t *testing.T) {
+ f64Frac := float64(2.5)
+ f64 := float64(2)
+
+ getFloat := func(v NormalValue) float64 { f, _ := v.Float(); return f }
+
+ v := NewNormalFloat(f64Frac)
+ assert.Equal(t, f64Frac, getFloat(v))
+
+ v = NewNormalFloat(float32(2.5))
+ assert.Equal(t, f64Frac, getFloat(v))
+
+ v = NewNormalFloat(int8(2))
+ assert.Equal(t, f64, getFloat(v))
+
+ v = NewNormalFloat(int16(2))
+ assert.Equal(t, f64, getFloat(v))
+
+ v = NewNormalFloat(int32(2))
+ assert.Equal(t, f64, getFloat(v))
+
+ v = NewNormalFloat(int64(2))
+ assert.Equal(t, f64, getFloat(v))
+
+ v = NewNormalFloat(int(2))
+ assert.Equal(t, f64, getFloat(v))
+
+ v = NewNormalFloat(uint8(2))
+ assert.Equal(t, f64, getFloat(v))
+
+ v = NewNormalFloat(uint16(2))
+ assert.Equal(t, f64, getFloat(v))
+
+ v = NewNormalFloat(uint32(2))
+ assert.Equal(t, f64, getFloat(v))
+
+ v = NewNormalFloat(uint64(2))
+ assert.Equal(t, f64, getFloat(v))
+
+ v = NewNormalFloat(uint(2))
+ assert.Equal(t, f64, getFloat(v))
+}
+
+func TestNormalValue_NewNormalString(t *testing.T) {
+ strInput := "str"
+
+ getString := func(v NormalValue) string { s, _ := v.String(); return s }
+
+ v := NewNormalString(strInput)
+ assert.Equal(t, strInput, getString(v))
+
+ v = NewNormalString([]byte{'s', 't', 'r'})
+ assert.Equal(t, strInput, getString(v))
+}
+
+func TestNormalValue_NewNormalBytes(t *testing.T) {
+ bytesInput := []byte("str")
+
+ getBytes := func(v NormalValue) []byte { b, _ := v.Bytes(); return b }
+
+ v := NewNormalBytes(bytesInput)
+ assert.Equal(t, bytesInput, getBytes(v))
+
+ v = NewNormalBytes("str")
+ assert.Equal(t, bytesInput, getBytes(v))
+}
+
+func TestNormalValue_NewNormalIntArray(t *testing.T) {
+ i64Input := []int64{2}
+
+ getIntArray := func(v NormalValue) []int64 { i, _ := v.IntArray(); return i }
+
+ v := NewNormalIntArray(i64Input)
+ assert.Equal(t, i64Input, getIntArray(v))
+
+ v = NewNormalIntArray([]float32{2.5})
+ assert.Equal(t, i64Input, getIntArray(v))
+
+ v = NewNormalIntArray([]int8{2})
+ assert.Equal(t, i64Input, getIntArray(v))
+
+ v = NewNormalIntArray([]int16{2})
+ assert.Equal(t, i64Input, getIntArray(v))
+
+ v = NewNormalIntArray([]int32{2})
+ assert.Equal(t, i64Input, getIntArray(v))
+
+ v = NewNormalIntArray([]int64{2})
+ assert.Equal(t, i64Input, getIntArray(v))
+
+ v = NewNormalIntArray([]int{2})
+ assert.Equal(t, i64Input, getIntArray(v))
+
+ v = NewNormalIntArray([]uint8{2})
+ assert.Equal(t, i64Input, getIntArray(v))
+
+ v = NewNormalIntArray([]uint16{2})
+ assert.Equal(t, i64Input, getIntArray(v))
+
+ v = NewNormalIntArray([]uint32{2})
+ assert.Equal(t, i64Input, getIntArray(v))
+
+ v = NewNormalIntArray([]uint64{2})
+ assert.Equal(t, i64Input, getIntArray(v))
+
+ v = NewNormalIntArray([]uint{2})
+ assert.Equal(t, i64Input, getIntArray(v))
+}
+
+func TestNormalValue_NewNormalFloatArray(t *testing.T) {
+ f64InputFrac := []float64{2.5}
+ f64Input := []float64{2.0}
+
+ getFloatArray := func(v NormalValue) []float64 { f, _ := v.FloatArray(); return f }
+
+ v := NewNormalFloatArray(f64InputFrac)
+ assert.Equal(t, f64InputFrac, getFloatArray(v))
+
+ v = NewNormalFloatArray([]float32{2.5})
+ assert.Equal(t, f64InputFrac, getFloatArray(v))
+
+ v = NewNormalFloatArray([]int8{2})
+ assert.Equal(t, f64Input, getFloatArray(v))
+
+ v = NewNormalFloatArray([]int16{2})
+ assert.Equal(t, f64Input, getFloatArray(v))
+
+ v = NewNormalFloatArray([]int32{2})
+ assert.Equal(t, f64Input, getFloatArray(v))
+
+ v = NewNormalFloatArray([]int64{2})
+ assert.Equal(t, f64Input, getFloatArray(v))
+
+ v = NewNormalFloatArray([]int{2})
+ assert.Equal(t, f64Input, getFloatArray(v))
+
+ v = NewNormalFloatArray([]uint8{2})
+ assert.Equal(t, f64Input, getFloatArray(v))
+
+ v = NewNormalFloatArray([]uint16{2})
+ assert.Equal(t, f64Input, getFloatArray(v))
+
+ v = NewNormalFloatArray([]uint32{2})
+ assert.Equal(t, f64Input, getFloatArray(v))
+
+ v = NewNormalFloatArray([]uint64{2})
+ assert.Equal(t, f64Input, getFloatArray(v))
+
+ v = NewNormalFloatArray([]uint{2})
+ assert.Equal(t, f64Input, getFloatArray(v))
+}
+
+func TestNormalValue_NewNormalStringArray(t *testing.T) {
+ strInput := []string{"str"}
+
+ getStringArray := func(v NormalValue) []string { s, _ := v.StringArray(); return s }
+
+ v := NewNormalStringArray(strInput)
+ assert.Equal(t, strInput, getStringArray(v))
+
+ v = NewNormalStringArray([][]byte{{'s', 't', 'r'}})
+ assert.Equal(t, strInput, getStringArray(v))
+}
+
+func TestNormalValue_NewNormalBytesArray(t *testing.T) {
+ bytesInput := [][]byte{[]byte("str")}
+
+ getBytesArray := func(v NormalValue) [][]byte { b, _ := v.BytesArray(); return b }
+
+ v := NewNormalBytesArray(bytesInput)
+ assert.Equal(t, bytesInput, getBytesArray(v))
+
+ v = NewNormalBytesArray([]string{"str"})
+ assert.Equal(t, bytesInput, getBytesArray(v))
+}
+
+func TestNormalValue_NewNormalNillableFloatArray(t *testing.T) {
+ f64InputFrac := []immutable.Option[float64]{immutable.Some(2.5)}
+ f64Input := []immutable.Option[float64]{immutable.Some(2.0)}
+
+ getNillableFloatArray := func(v NormalValue) []immutable.Option[float64] { f, _ := v.NillableFloatArray(); return f }
+
+ v := NewNormalNillableFloatArray(f64InputFrac)
+ assert.Equal(t, f64InputFrac, getNillableFloatArray(v))
+
+ v = NewNormalNillableFloatArray([]immutable.Option[float32]{immutable.Some[float32](2.5)})
+ assert.Equal(t, f64InputFrac, getNillableFloatArray(v))
+
+ v = NewNormalNillableFloatArray([]immutable.Option[int8]{immutable.Some[int8](2)})
+ assert.Equal(t, f64Input, getNillableFloatArray(v))
+
+ v = NewNormalNillableFloatArray([]immutable.Option[int16]{immutable.Some[int16](2)})
+ assert.Equal(t, f64Input, getNillableFloatArray(v))
+
+ v = NewNormalNillableFloatArray([]immutable.Option[int32]{immutable.Some[int32](2)})
+ assert.Equal(t, f64Input, getNillableFloatArray(v))
+
+ v = NewNormalNillableFloatArray([]immutable.Option[int64]{immutable.Some[int64](2)})
+ assert.Equal(t, f64Input, getNillableFloatArray(v))
+
+ v = NewNormalNillableFloatArray([]immutable.Option[int]{immutable.Some[int](2)})
+ assert.Equal(t, f64Input, getNillableFloatArray(v))
+
+ v = NewNormalNillableFloatArray([]immutable.Option[uint8]{immutable.Some[uint8](2)})
+ assert.Equal(t, f64Input, getNillableFloatArray(v))
+
+ v = NewNormalNillableFloatArray([]immutable.Option[uint16]{immutable.Some[uint16](2)})
+ assert.Equal(t, f64Input, getNillableFloatArray(v))
+
+ v = NewNormalNillableFloatArray([]immutable.Option[uint32]{immutable.Some[uint32](2)})
+ assert.Equal(t, f64Input, getNillableFloatArray(v))
+
+ v = NewNormalNillableFloatArray([]immutable.Option[uint64]{immutable.Some[uint64](2)})
+ assert.Equal(t, f64Input, getNillableFloatArray(v))
+
+ v = NewNormalNillableFloatArray([]immutable.Option[uint]{immutable.Some[uint](2)})
+ assert.Equal(t, f64Input, getNillableFloatArray(v))
+}
+
+func TestNormalValue_NewNormalNillableIntArray(t *testing.T) {
+ i64Input := []immutable.Option[int64]{immutable.Some[int64](2)}
+
+ getNillableIntArray := func(v NormalValue) []immutable.Option[int64] { i, _ := v.NillableIntArray(); return i }
+
+ v := NewNormalNillableIntArray(i64Input)
+ assert.Equal(t, i64Input, getNillableIntArray(v))
+
+ v = NewNormalNillableIntArray([]immutable.Option[float32]{immutable.Some[float32](2.5)})
+ assert.Equal(t, i64Input, getNillableIntArray(v))
+
+ v = NewNormalNillableIntArray([]immutable.Option[float64]{immutable.Some[float64](2.5)})
+ assert.Equal(t, i64Input, getNillableIntArray(v))
+
+ v = NewNormalNillableIntArray([]immutable.Option[int8]{immutable.Some[int8](2)})
+ assert.Equal(t, i64Input, getNillableIntArray(v))
+
+ v = NewNormalNillableIntArray([]immutable.Option[int16]{immutable.Some[int16](2)})
+ assert.Equal(t, i64Input, getNillableIntArray(v))
+
+ v = NewNormalNillableIntArray([]immutable.Option[int32]{immutable.Some[int32](2)})
+ assert.Equal(t, i64Input, getNillableIntArray(v))
+
+ v = NewNormalNillableIntArray([]immutable.Option[int]{immutable.Some[int](2)})
+ assert.Equal(t, i64Input, getNillableIntArray(v))
+
+ v = NewNormalNillableIntArray([]immutable.Option[uint8]{immutable.Some[uint8](2)})
+ assert.Equal(t, i64Input, getNillableIntArray(v))
+
+ v = NewNormalNillableIntArray([]immutable.Option[uint16]{immutable.Some[uint16](2)})
+ assert.Equal(t, i64Input, getNillableIntArray(v))
+
+ v = NewNormalNillableIntArray([]immutable.Option[uint32]{immutable.Some[uint32](2)})
+ assert.Equal(t, i64Input, getNillableIntArray(v))
+
+ v = NewNormalNillableIntArray([]immutable.Option[uint64]{immutable.Some[uint64](2)})
+ assert.Equal(t, i64Input, getNillableIntArray(v))
+
+ v = NewNormalNillableIntArray([]immutable.Option[uint]{immutable.Some[uint](2)})
+ assert.Equal(t, i64Input, getNillableIntArray(v))
+}
+
+func TestNormalValue_NewNormalNillableStringArray(t *testing.T) {
+ strInput := []immutable.Option[string]{immutable.Some("str")}
+
+ getNillableStringArray := func(v NormalValue) []immutable.Option[string] { s, _ := v.NillableStringArray(); return s }
+
+ v := NewNormalNillableStringArray(strInput)
+ assert.Equal(t, strInput, getNillableStringArray(v))
+
+ v = NewNormalNillableStringArray([]immutable.Option[[]byte]{immutable.Some[[]byte]([]byte{'s', 't', 'r'})})
+ assert.Equal(t, strInput, getNillableStringArray(v))
+}
+
+func TestNormalValue_NewNormalNillableBytesArray(t *testing.T) {
+ bytesInput := []immutable.Option[[]byte]{immutable.Some[[]byte]([]byte("str"))}
+
+ getNillableBytesArray := func(v NormalValue) []immutable.Option[[]byte] { b, _ := v.NillableBytesArray(); return b }
+
+ v := NewNormalNillableBytesArray(bytesInput)
+ assert.Equal(t, bytesInput, getNillableBytesArray(v))
+
+ v = NewNormalNillableBytesArray([]immutable.Option[string]{immutable.Some("str")})
+ assert.Equal(t, bytesInput, getNillableBytesArray(v))
+}
+
+func TestNormalValue_NewNormalIntArrayNillable(t *testing.T) {
+ i64Input := immutable.Some([]int64{2})
+
+ getIntNillableArray := func(v NormalValue) immutable.Option[[]int64] { i, _ := v.IntNillableArray(); return i }
+
+ v := NewNormalIntNillableArray(i64Input)
+ assert.Equal(t, i64Input, getIntNillableArray(v))
+
+ v = NewNormalIntNillableArray(immutable.Some([]float32{2.5}))
+ assert.Equal(t, i64Input, getIntNillableArray(v))
+
+ v = NewNormalIntNillableArray(immutable.Some([]float64{2.5}))
+ assert.Equal(t, i64Input, getIntNillableArray(v))
+
+ v = NewNormalIntNillableArray(immutable.Some([]int8{2}))
+ assert.Equal(t, i64Input, getIntNillableArray(v))
+
+ v = NewNormalIntNillableArray(immutable.Some([]int16{2}))
+ assert.Equal(t, i64Input, getIntNillableArray(v))
+
+ v = NewNormalIntNillableArray(immutable.Some([]int32{2}))
+ assert.Equal(t, i64Input, getIntNillableArray(v))
+
+ v = NewNormalIntNillableArray(immutable.Some([]int{2}))
+ assert.Equal(t, i64Input, getIntNillableArray(v))
+
+ v = NewNormalIntNillableArray(immutable.Some([]uint8{2}))
+ assert.Equal(t, i64Input, getIntNillableArray(v))
+
+ v = NewNormalIntNillableArray(immutable.Some([]uint16{2}))
+ assert.Equal(t, i64Input, getIntNillableArray(v))
+
+ v = NewNormalIntNillableArray(immutable.Some([]uint32{2}))
+ assert.Equal(t, i64Input, getIntNillableArray(v))
+
+ v = NewNormalIntNillableArray(immutable.Some([]uint64{2}))
+ assert.Equal(t, i64Input, getIntNillableArray(v))
+
+ v = NewNormalIntNillableArray(immutable.Some([]uint{2}))
+ assert.Equal(t, i64Input, getIntNillableArray(v))
+}
+
+func TestNormalValue_NewNormalFloatNillableArray(t *testing.T) {
+ f64InputFrac := immutable.Some([]float64{2.5})
+ f64Input := immutable.Some([]float64{2.0})
+
+ getFloatNillableArray := func(v NormalValue) immutable.Option[[]float64] { f, _ := v.FloatNillableArray(); return f }
+
+ v := NewNormalFloatNillableArray(f64InputFrac)
+ assert.Equal(t, f64InputFrac, getFloatNillableArray(v))
+
+ v = NewNormalFloatNillableArray(immutable.Some([]float32{2.5}))
+ assert.Equal(t, f64InputFrac, getFloatNillableArray(v))
+
+ v = NewNormalFloatNillableArray(immutable.Some([]int8{2}))
+ assert.Equal(t, f64Input, getFloatNillableArray(v))
+
+ v = NewNormalFloatNillableArray(immutable.Some([]int16{2}))
+ assert.Equal(t, f64Input, getFloatNillableArray(v))
+
+ v = NewNormalFloatNillableArray(immutable.Some([]int32{2}))
+ assert.Equal(t, f64Input, getFloatNillableArray(v))
+
+ v = NewNormalFloatNillableArray(immutable.Some([]int64{2}))
+ assert.Equal(t, f64Input, getFloatNillableArray(v))
+
+ v = NewNormalFloatNillableArray(immutable.Some([]int{2}))
+ assert.Equal(t, f64Input, getFloatNillableArray(v))
+
+ v = NewNormalFloatNillableArray(immutable.Some([]uint8{2}))
+ assert.Equal(t, f64Input, getFloatNillableArray(v))
+
+ v = NewNormalFloatNillableArray(immutable.Some([]uint16{2}))
+ assert.Equal(t, f64Input, getFloatNillableArray(v))
+
+ v = NewNormalFloatNillableArray(immutable.Some([]uint32{2}))
+ assert.Equal(t, f64Input, getFloatNillableArray(v))
+
+ v = NewNormalFloatNillableArray(immutable.Some([]uint64{2}))
+ assert.Equal(t, f64Input, getFloatNillableArray(v))
+
+ v = NewNormalFloatNillableArray(immutable.Some([]uint{2}))
+ assert.Equal(t, f64Input, getFloatNillableArray(v))
+}
+
+func TestNormalValue_NewNormalStringNillableArray(t *testing.T) {
+ strInput := immutable.Some([]string{"str"})
+
+ getStringNillableArray := func(v NormalValue) immutable.Option[[]string] { s, _ := v.StringNillableArray(); return s }
+
+ v := NewNormalStringNillableArray(strInput)
+ assert.Equal(t, strInput, getStringNillableArray(v))
+
+ v = NewNormalStringNillableArray(immutable.Some([][]byte{{'s', 't', 'r'}}))
+ assert.Equal(t, strInput, getStringNillableArray(v))
+}
+
+func TestNormalValue_NewNormalBytesNillableArray(t *testing.T) {
+ bytesInput := immutable.Some([][]byte{{'s', 't', 'r'}})
+
+ getBytesNillableArray := func(v NormalValue) immutable.Option[[][]byte] { s, _ := v.BytesNillableArray(); return s }
+
+ v := NewNormalBytesNillableArray(immutable.Some([]string{"str"}))
+ assert.Equal(t, bytesInput, getBytesNillableArray(v))
+
+ v = NewNormalBytesNillableArray(bytesInput)
+ assert.Equal(t, bytesInput, getBytesNillableArray(v))
+}
+
+func TestNormalValue_NewNormalNillableIntNillableArray(t *testing.T) {
+ i64Input := immutable.Some([]immutable.Option[int64]{immutable.Some(int64(2))})
+
+ getNillableIntNillableArray := func(v NormalValue) immutable.Option[[]immutable.Option[int64]] {
+ i, _ := v.NillableIntNillableArray()
+ return i
+ }
+
+ v := NewNormalNillableIntNillableArray(i64Input)
+ assert.Equal(t, i64Input, getNillableIntNillableArray(v))
+
+ v = NewNormalNillableIntNillableArray(immutable.Some([]immutable.Option[float32]{immutable.Some(float32(2.5))}))
+ assert.Equal(t, i64Input, getNillableIntNillableArray(v))
+
+ v = NewNormalNillableIntNillableArray(immutable.Some([]immutable.Option[float64]{immutable.Some(2.5)}))
+ assert.Equal(t, i64Input, getNillableIntNillableArray(v))
+
+ v = NewNormalNillableIntNillableArray(immutable.Some([]immutable.Option[int8]{immutable.Some(int8(2))}))
+ assert.Equal(t, i64Input, getNillableIntNillableArray(v))
+
+ v = NewNormalNillableIntNillableArray(immutable.Some([]immutable.Option[int16]{immutable.Some(int16(2))}))
+ assert.Equal(t, i64Input, getNillableIntNillableArray(v))
+
+ v = NewNormalNillableIntNillableArray(immutable.Some([]immutable.Option[int32]{immutable.Some(int32(2))}))
+ assert.Equal(t, i64Input, getNillableIntNillableArray(v))
+
+ v = NewNormalNillableIntNillableArray(immutable.Some([]immutable.Option[int]{immutable.Some(int(2))}))
+ assert.Equal(t, i64Input, getNillableIntNillableArray(v))
+
+ v = NewNormalNillableIntNillableArray(immutable.Some([]immutable.Option[uint8]{immutable.Some(uint8(2))}))
+ assert.Equal(t, i64Input, getNillableIntNillableArray(v))
+
+ v = NewNormalNillableIntNillableArray(immutable.Some([]immutable.Option[uint16]{immutable.Some(uint16(2))}))
+ assert.Equal(t, i64Input, getNillableIntNillableArray(v))
+
+ v = NewNormalNillableIntNillableArray(immutable.Some([]immutable.Option[uint32]{immutable.Some(uint32(2))}))
+ assert.Equal(t, i64Input, getNillableIntNillableArray(v))
+
+ v = NewNormalNillableIntNillableArray(immutable.Some([]immutable.Option[uint64]{immutable.Some(uint64(2))}))
+ assert.Equal(t, i64Input, getNillableIntNillableArray(v))
+
+ v = NewNormalNillableIntNillableArray(immutable.Some([]immutable.Option[uint]{immutable.Some(uint(2))}))
+ assert.Equal(t, i64Input, getNillableIntNillableArray(v))
+}
+
+func TestNormalValue_NewNormalNillableFloatNillableArray(t *testing.T) {
+ f64InputFrac := immutable.Some([]immutable.Option[float64]{immutable.Some(2.5)})
+ f64Input := immutable.Some([]immutable.Option[float64]{immutable.Some(2.0)})
+
+ getNillableFloatNillableArray := func(v NormalValue) immutable.Option[[]immutable.Option[float64]] {
+ f, _ := v.NillableFloatNillableArray()
+ return f
+ }
+
+ v := NewNormalNillableFloatNillableArray(f64InputFrac)
+ assert.Equal(t, f64InputFrac, getNillableFloatNillableArray(v))
+
+ v = NewNormalNillableFloatNillableArray(immutable.Some([]immutable.Option[float32]{immutable.Some(float32(2.5))}))
+ assert.Equal(t, f64InputFrac, getNillableFloatNillableArray(v))
+
+ v = NewNormalNillableFloatNillableArray(immutable.Some([]immutable.Option[int8]{immutable.Some(int8(2))}))
+ assert.Equal(t, f64Input, getNillableFloatNillableArray(v))
+
+ v = NewNormalNillableFloatNillableArray(immutable.Some([]immutable.Option[int16]{immutable.Some(int16(2))}))
+ assert.Equal(t, f64Input, getNillableFloatNillableArray(v))
+
+ v = NewNormalNillableFloatNillableArray(immutable.Some([]immutable.Option[int32]{immutable.Some(int32(2))}))
+ assert.Equal(t, f64Input, getNillableFloatNillableArray(v))
+
+ v = NewNormalNillableFloatNillableArray(immutable.Some([]immutable.Option[int64]{immutable.Some(int64(2))}))
+ assert.Equal(t, f64Input, getNillableFloatNillableArray(v))
+
+ v = NewNormalNillableFloatNillableArray(immutable.Some([]immutable.Option[int]{immutable.Some(2)}))
+ assert.Equal(t, f64Input, getNillableFloatNillableArray(v))
+
+ v = NewNormalNillableFloatNillableArray(immutable.Some([]immutable.Option[uint8]{immutable.Some(uint8(2))}))
+ assert.Equal(t, f64Input, getNillableFloatNillableArray(v))
+
+ v = NewNormalNillableFloatNillableArray(immutable.Some([]immutable.Option[uint16]{immutable.Some(uint16(2))}))
+ assert.Equal(t, f64Input, getNillableFloatNillableArray(v))
+
+ v = NewNormalNillableFloatNillableArray(immutable.Some([]immutable.Option[uint32]{immutable.Some(uint32(2))}))
+ assert.Equal(t, f64Input, getNillableFloatNillableArray(v))
+
+ v = NewNormalNillableFloatNillableArray(immutable.Some([]immutable.Option[uint64]{immutable.Some(uint64(2))}))
+ assert.Equal(t, f64Input, getNillableFloatNillableArray(v))
+
+ v = NewNormalNillableFloatNillableArray(immutable.Some([]immutable.Option[uint]{immutable.Some(uint(2))}))
+ assert.Equal(t, f64Input, getNillableFloatNillableArray(v))
+}
+
+func TestNormalValue_NewNormalNillableStringNillableArray(t *testing.T) {
+ strInput := immutable.Some([]immutable.Option[string]{immutable.Some("str")})
+
+ getNillableStringNillableArray := func(v NormalValue) immutable.Option[[]immutable.Option[string]] {
+ s, _ := v.NillableStringNillableArray()
+ return s
+ }
+
+ v := NewNormalNillableStringNillableArray(strInput)
+ assert.Equal(t, strInput, getNillableStringNillableArray(v))
+
+ bytesInput := immutable.Some([]immutable.Option[[]byte]{immutable.Some([]byte{'s', 't', 'r'})})
+ v = NewNormalNillableStringNillableArray(bytesInput)
+ assert.Equal(t, strInput, getNillableStringNillableArray(v))
+}
+
+func TestNormalValue_NewNormalNillableBytesNillableArray(t *testing.T) {
+ bytesInput := immutable.Some([]immutable.Option[[]byte]{immutable.Some([]byte{'s', 't', 'r'})})
+
+ getNillableBytesNillableArray := func(v NormalValue) immutable.Option[[]immutable.Option[[]byte]] {
+ s, _ := v.NillableBytesNillableArray()
+ return s
+ }
+
+ v := NewNormalNillableBytesNillableArray(bytesInput)
+ assert.Equal(t, bytesInput, getNillableBytesNillableArray(v))
+
+ strInput := immutable.Some([]immutable.Option[string]{immutable.Some("str")})
+ v = NewNormalNillableBytesNillableArray(strInput)
+ assert.Equal(t, bytesInput, getNillableBytesNillableArray(v))
+}
+
+func TestNormalValue_NewNormalNil(t *testing.T) {
+ fieldKinds := []FieldKind{}
+ for _, kind := range FieldKindStringToEnumMapping {
+ fieldKinds = append(fieldKinds, kind)
+ }
+ fieldKinds = append(fieldKinds, ObjectKind("Object"))
+ fieldKinds = append(fieldKinds, ObjectArrayKind("ObjectArr"))
+
+ for _, kind := range fieldKinds {
+ if kind.IsNillable() {
+ v, err := NewNormalNil(kind)
+ require.NoError(t, err)
+
+ assert.True(t, v.IsNil())
+ } else {
+ _, err := NewNormalNil(kind)
+ require.Error(t, err)
+ }
+ }
+}
+
+func TestNormalValue_ToArrayOfNormalValues(t *testing.T) {
+ now := time.Now()
+ doc1 := &Document{}
+ doc2 := &Document{}
+
+ normalNil, err := NewNormalNil(FieldKind_NILLABLE_INT)
+ require.NoError(t, err)
+
+ tests := []struct {
+ name string
+ input NormalValue
+ expected []NormalValue
+ err string
+ }{
+ {
+ name: "nil",
+ input: normalNil,
+ },
+ {
+ name: "not array",
+ input: NewNormalInt(1),
+ err: errCanNotTurnNormalValueIntoArray,
+ },
+ {
+ name: "bool elements",
+ input: NewNormalBoolArray([]bool{true, false}),
+ expected: []NormalValue{NewNormalBool(true), NewNormalBool(false)},
+ },
+ {
+ name: "int elements",
+ input: NewNormalIntArray([]int64{1, 2}),
+ expected: []NormalValue{NewNormalInt(1), NewNormalInt(2)},
+ },
+ {
+ name: "float elements",
+ input: NewNormalFloatArray([]float64{1.0, 2.0}),
+ expected: []NormalValue{NewNormalFloat(1.0), NewNormalFloat(2.0)},
+ },
+ {
+ name: "string elements",
+ input: NewNormalStringArray([]string{"test", "test2"}),
+ expected: []NormalValue{NewNormalString("test"), NewNormalString("test2")},
+ },
+ {
+ name: "bytes elements",
+ input: NewNormalBytesArray([][]byte{{1, 2, 3}, {4, 5, 6}}),
+ expected: []NormalValue{NewNormalBytes([]byte{1, 2, 3}), NewNormalBytes([]byte{4, 5, 6})},
+ },
+ {
+ name: "time elements",
+ input: NewNormalTimeArray([]time.Time{now, now}),
+ expected: []NormalValue{NewNormalTime(now), NewNormalTime(now)},
+ },
+ {
+ name: "document elements",
+ input: NewNormalDocumentArray([]*Document{doc1, doc2}),
+ expected: []NormalValue{NewNormalDocument(doc1), NewNormalDocument(doc2)},
+ },
+ {
+ name: "nillable bool elements",
+ input: NewNormalNillableBoolArray([]immutable.Option[bool]{
+ immutable.Some(true), immutable.Some(false)}),
+ expected: []NormalValue{
+ NewNormalNillableBool(immutable.Some(true)),
+ NewNormalNillableBool(immutable.Some(false)),
+ },
+ },
+ {
+ name: "nillable int elements",
+ input: NewNormalNillableIntArray([]immutable.Option[int64]{
+ immutable.Some(int64(1)), immutable.Some(int64(2))}),
+ expected: []NormalValue{
+ NewNormalNillableInt(immutable.Some(int64(1))),
+ NewNormalNillableInt(immutable.Some(int64(2))),
+ },
+ },
+ {
+ name: "nillable float elements",
+ input: NewNormalNillableFloatArray([]immutable.Option[float64]{
+ immutable.Some(1.0), immutable.Some(2.0)}),
+ expected: []NormalValue{
+ NewNormalNillableFloat(immutable.Some(1.0)),
+ NewNormalNillableFloat(immutable.Some(2.0)),
+ },
+ },
+ {
+ name: "nillable string elements",
+ input: NewNormalNillableStringArray([]immutable.Option[string]{
+ immutable.Some("test"), immutable.Some("test2")}),
+ expected: []NormalValue{
+ NewNormalNillableString(immutable.Some("test")),
+ NewNormalNillableString(immutable.Some("test2")),
+ },
+ },
+ {
+ name: "nillable bytes elements",
+ input: NewNormalNillableBytesArray([]immutable.Option[[]byte]{
+ immutable.Some([]byte{1, 2, 3}), immutable.Some([]byte{4, 5, 6})}),
+ expected: []NormalValue{
+ NewNormalNillableBytes(immutable.Some([]byte{1, 2, 3})),
+ NewNormalNillableBytes(immutable.Some([]byte{4, 5, 6})),
+ },
+ },
+ {
+ name: "nillable time elements",
+ input: NewNormalNillableTimeArray([]immutable.Option[time.Time]{
+ immutable.Some(now), immutable.Some(now)}),
+ expected: []NormalValue{
+ NewNormalNillableTime(immutable.Some(now)),
+ NewNormalNillableTime(immutable.Some(now)),
+ },
+ },
+ {
+ name: "nillable document elements",
+ input: NewNormalNillableDocumentArray([]immutable.Option[*Document]{
+ immutable.Some(doc1), immutable.Some(doc2)}),
+ expected: []NormalValue{
+ NewNormalNillableDocument(immutable.Some(doc1)),
+ NewNormalNillableDocument(immutable.Some(doc2)),
+ },
+ },
+ {
+ name: "nillable array of bool elements",
+ input: NewNormalBoolNillableArray(immutable.Some([]bool{true})),
+ expected: []NormalValue{NewNormalBool(true)},
+ },
+ {
+ name: "nillable array of int elements",
+ input: NewNormalIntNillableArray(immutable.Some([]int64{1})),
+ expected: []NormalValue{NewNormalInt(1)},
+ },
+ {
+ name: "nillable array of float elements",
+ input: NewNormalFloatNillableArray(immutable.Some([]float64{1.0})),
+ expected: []NormalValue{NewNormalFloat(1.0)},
+ },
+ {
+ name: "nillable array of string elements",
+ input: NewNormalStringNillableArray(immutable.Some([]string{"test"})),
+ expected: []NormalValue{NewNormalString("test")},
+ },
+ {
+ name: "nillable array of bytes elements",
+ input: NewNormalBytesNillableArray(immutable.Some([][]byte{{1, 2, 3}})),
+ expected: []NormalValue{NewNormalBytes([]byte{1, 2, 3})},
+ },
+ {
+ name: "nillable array of time elements",
+ input: NewNormalTimeNillableArray(immutable.Some([]time.Time{now})),
+ expected: []NormalValue{NewNormalTime(now)},
+ },
+ {
+ name: "nillable array of document elements",
+ input: NewNormalDocumentNillableArray(immutable.Some([]*Document{doc1})),
+ expected: []NormalValue{NewNormalDocument(doc1)},
+ },
+ {
+ name: "nillable array of nillable bool elements",
+ input: NewNormalNillableBoolNillableArray(
+ immutable.Some([]immutable.Option[bool]{immutable.Some(true)})),
+ expected: []NormalValue{NewNormalNillableBool(immutable.Some(true))},
+ },
+ {
+ name: "nillable array of nillable int elements",
+ input: NewNormalNillableIntNillableArray(
+ immutable.Some([]immutable.Option[int64]{immutable.Some(int64(1))})),
+ expected: []NormalValue{NewNormalNillableInt(immutable.Some(int64(1)))},
+ },
+ {
+ name: "nillable array of nillable float elements",
+ input: NewNormalNillableFloatNillableArray(
+ immutable.Some([]immutable.Option[float64]{immutable.Some(1.0)})),
+ expected: []NormalValue{NewNormalNillableFloat(immutable.Some(1.0))},
+ },
+ {
+ name: "nillable array of nillable string elements",
+ input: NewNormalNillableStringNillableArray(
+ immutable.Some([]immutable.Option[string]{immutable.Some("test")})),
+ expected: []NormalValue{NewNormalNillableString(immutable.Some("test"))},
+ },
+ {
+ name: "nillable array of nillable bytes elements",
+ input: NewNormalNillableBytesNillableArray(
+ immutable.Some([]immutable.Option[[]byte]{immutable.Some([]byte{1, 2, 3})})),
+ expected: []NormalValue{NewNormalNillableBytes(immutable.Some([]byte{1, 2, 3}))},
+ },
+ {
+ name: "nillable array of nillable time elements",
+ input: NewNormalNillableTimeNillableArray(
+ immutable.Some([]immutable.Option[time.Time]{immutable.Some(now)})),
+ expected: []NormalValue{NewNormalNillableTime(immutable.Some(now))},
+ },
+ {
+ name: "nillable array of nillable document elements",
+ input: NewNormalNillableDocumentNillableArray(
+ immutable.Some([]immutable.Option[*Document]{immutable.Some(doc1)})),
+ expected: []NormalValue{NewNormalNillableDocument(immutable.Some(doc1))},
+ },
+ }
+
+ for _, tt := range tests {
+ tStr := string(tt.name)
+ t.Run(tStr, func(t *testing.T) {
+ actual, err := ToArrayOfNormalValues(tt.input)
+ if tt.err != "" {
+ require.ErrorContains(t, err, tt.err)
+ return
+ }
+
+ assert.Equal(t, tt.expected, actual)
+ })
+ }
+}
+
+// This test documents a bug where array values
+// were not returning the correct value for IsNillable
+// and were also not convertible to a normal nil kind.
+func TestArrayValue_IsNillable(t *testing.T) {
+ fieldKinds := []FieldKind{
+ FieldKind_BOOL_ARRAY,
+ FieldKind_INT_ARRAY,
+ FieldKind_FLOAT_ARRAY,
+ FieldKind_STRING_ARRAY,
+ FieldKind_NILLABLE_BOOL_ARRAY,
+ FieldKind_NILLABLE_INT_ARRAY,
+ FieldKind_NILLABLE_FLOAT_ARRAY,
+ FieldKind_NILLABLE_STRING_ARRAY,
+ }
+
+ for _, kind := range fieldKinds {
+ assert.True(t, kind.IsNillable())
+
+ v, err := NewNormalNil(kind)
+ require.NoError(t, err)
+
+ assert.True(t, v.IsNil())
+ }
+}
diff --git a/client/normal_void.go b/client/normal_void.go
new file mode 100644
index 0000000000..e3e29b5094
--- /dev/null
+++ b/client/normal_void.go
@@ -0,0 +1,205 @@
+// Copyright 2024 Democratized Data Foundation
+//
+// Use of this software is governed by the Business Source License
+// included in the file licenses/BSL.txt.
+//
+// As of the Change Date specified in that file, in accordance with
+// the Business Source License, use of this software will be governed
+// by the Apache License, Version 2.0, included in the file
+// licenses/APL.txt.
+
+package client
+
+import (
+ "time"
+
+ "github.com/sourcenetwork/immutable"
+)
+
+// NormalVoid is a default implementation of NormalValue to be embedded in other types.
+// It can be also used to realize Null Object pattern https://en.wikipedia.org/wiki/Null_object_pattern.
+type NormalVoid struct{}
+
+func (NormalVoid) Unwrap() any {
+ return nil
+}
+
+func (NormalVoid) IsNil() bool {
+ return false
+}
+
+func (NormalVoid) IsNillable() bool {
+ return false
+}
+
+func (NormalVoid) Bool() (bool, bool) {
+ return false, false
+}
+
+func (NormalVoid) Int() (int64, bool) {
+ return 0, false
+}
+
+func (NormalVoid) Float() (float64, bool) {
+ return 0, false
+}
+
+func (NormalVoid) String() (string, bool) {
+ return "", false
+}
+
+func (NormalVoid) Bytes() ([]byte, bool) {
+ return nil, false
+}
+
+func (NormalVoid) Time() (time.Time, bool) {
+ return time.Time{}, false
+}
+
+func (NormalVoid) Document() (*Document, bool) {
+ return nil, false
+}
+
+func (NormalVoid) NillableBool() (immutable.Option[bool], bool) {
+ return immutable.None[bool](), false
+}
+
+func (NormalVoid) NillableInt() (immutable.Option[int64], bool) {
+ return immutable.None[int64](), false
+}
+
+func (NormalVoid) NillableFloat() (immutable.Option[float64], bool) {
+ return immutable.None[float64](), false
+}
+
+func (NormalVoid) NillableString() (immutable.Option[string], bool) {
+ return immutable.None[string](), false
+}
+
+func (NormalVoid) NillableBytes() (immutable.Option[[]byte], bool) {
+ return immutable.None[[]byte](), false
+}
+
+func (NormalVoid) NillableTime() (immutable.Option[time.Time], bool) {
+ return immutable.None[time.Time](), false
+}
+
+func (NormalVoid) NillableDocument() (immutable.Option[*Document], bool) {
+ return immutable.None[*Document](), false
+}
+
+func (NormalVoid) IsArray() bool {
+ return false
+}
+
+func (NormalVoid) BoolArray() ([]bool, bool) {
+ return nil, false
+}
+
+func (NormalVoid) IntArray() ([]int64, bool) {
+ return nil, false
+}
+
+func (NormalVoid) FloatArray() ([]float64, bool) {
+ return nil, false
+}
+
+func (NormalVoid) StringArray() ([]string, bool) {
+ return nil, false
+}
+
+func (NormalVoid) BytesArray() ([][]byte, bool) {
+ return nil, false
+}
+
+func (NormalVoid) TimeArray() ([]time.Time, bool) {
+ return nil, false
+}
+
+func (NormalVoid) DocumentArray() ([]*Document, bool) {
+ return nil, false
+}
+
+func (NormalVoid) NillableBoolArray() ([]immutable.Option[bool], bool) {
+ return nil, false
+}
+
+func (NormalVoid) NillableIntArray() ([]immutable.Option[int64], bool) {
+ return nil, false
+}
+
+func (NormalVoid) NillableFloatArray() ([]immutable.Option[float64], bool) {
+ return nil, false
+}
+
+func (NormalVoid) NillableStringArray() ([]immutable.Option[string], bool) {
+ return nil, false
+}
+
+func (NormalVoid) NillableBytesArray() ([]immutable.Option[[]byte], bool) {
+ return nil, false
+}
+
+func (NormalVoid) NillableTimeArray() ([]immutable.Option[time.Time], bool) {
+ return nil, false
+}
+
+func (NormalVoid) NillableDocumentArray() ([]immutable.Option[*Document], bool) {
+ return nil, false
+}
+
+func (NormalVoid) BoolNillableArray() (immutable.Option[[]bool], bool) {
+ return immutable.None[[]bool](), false
+}
+
+func (NormalVoid) IntNillableArray() (immutable.Option[[]int64], bool) {
+ return immutable.None[[]int64](), false
+}
+
+func (NormalVoid) FloatNillableArray() (immutable.Option[[]float64], bool) {
+ return immutable.None[[]float64](), false
+}
+
+func (NormalVoid) StringNillableArray() (immutable.Option[[]string], bool) {
+ return immutable.None[[]string](), false
+}
+
+func (NormalVoid) BytesNillableArray() (immutable.Option[[][]byte], bool) {
+ return immutable.None[[][]byte](), false
+}
+
+func (NormalVoid) TimeNillableArray() (immutable.Option[[]time.Time], bool) {
+ return immutable.None[[]time.Time](), false
+}
+
+func (NormalVoid) DocumentNillableArray() (immutable.Option[[]*Document], bool) {
+ return immutable.None[[]*Document](), false
+}
+
+func (NormalVoid) NillableBoolNillableArray() (immutable.Option[[]immutable.Option[bool]], bool) {
+ return immutable.None[[]immutable.Option[bool]](), false
+}
+
+func (NormalVoid) NillableIntNillableArray() (immutable.Option[[]immutable.Option[int64]], bool) {
+ return immutable.None[[]immutable.Option[int64]](), false
+}
+
+func (NormalVoid) NillableFloatNillableArray() (immutable.Option[[]immutable.Option[float64]], bool) {
+ return immutable.None[[]immutable.Option[float64]](), false
+}
+
+func (NormalVoid) NillableStringNillableArray() (immutable.Option[[]immutable.Option[string]], bool) {
+ return immutable.None[[]immutable.Option[string]](), false
+}
+
+func (NormalVoid) NillableBytesNillableArray() (immutable.Option[[]immutable.Option[[]byte]], bool) {
+ return immutable.None[[]immutable.Option[[]byte]](), false
+}
+
+func (NormalVoid) NillableTimeNillableArray() (immutable.Option[[]immutable.Option[time.Time]], bool) {
+ return immutable.None[[]immutable.Option[time.Time]](), false
+}
+
+func (NormalVoid) NillableDocumentNillableArray() (immutable.Option[[]immutable.Option[*Document]], bool) {
+ return immutable.None[[]immutable.Option[*Document]](), false
+}
diff --git a/client/policy.go b/client/policy.go
new file mode 100644
index 0000000000..5b877696c2
--- /dev/null
+++ b/client/policy.go
@@ -0,0 +1,31 @@
+// Copyright 2024 Democratized Data Foundation
+//
+// Use of this software is governed by the Business Source License
+// included in the file licenses/BSL.txt.
+//
+// As of the Change Date specified in that file, in accordance with
+// the Business Source License, use of this software will be governed
+// by the Apache License, Version 2.0, included in the file
+// licenses/APL.txt.
+
+package client
+
+// PolicyDescription describes a policy which is made up of a valid policyID that is
+// registered with acp and has a valid DPI compliant resource name that also
+// exists on that policy, the description is already validated.
+type PolicyDescription struct {
+ // ID is the local policyID when using local acp, and global policyID when
+ // using remote acp with sourcehub. This identifier is externally managed
+ // by the acp system.
+ ID string
+
+ // ResourceName is the name of the corresponding resource within the policy.
+ ResourceName string
+}
+
+// AddPolicyResult wraps the result of successfully adding/registering a Policy.
+type AddPolicyResult struct {
+ // PolicyID is the unique identifier returned by the acp system,
+ // upon successful creation of a policy.
+ PolicyID string
+}
diff --git a/client/request/aggregate.go b/client/request/aggregate.go
index 902134b258..fa7188977e 100644
--- a/client/request/aggregate.go
+++ b/client/request/aggregate.go
@@ -10,20 +10,43 @@
package request
-import immutables "github.com/sourcenetwork/immutable"
+import "github.com/sourcenetwork/immutable"
+// Aggregate represents an aggregate operation upon a set of child properties.
+//
+// Which aggregate this represents (e.g. _count, _avg, etc.) is determined by its
+// [Name] property.
type Aggregate struct {
Field
+ // Targets hosts the properties to aggregate.
+ //
+ // When multiple properties are selected, their values will be gathered into a single set
+ // upon which the aggregate will be performed. For example, if this aggregate represents
+ // and average of the Friends.Age and Parents.Age fields, the result will be the average
+ // age of all their friends and parents, it will not be an average of their average ages.
Targets []*AggregateTarget
}
+// AggregateTarget represents the target of an [Aggregate].
type AggregateTarget struct {
- HostName string
- ChildName immutables.Option[string]
+ Limitable
+ Offsetable
+ Orderable
+ Filterable
+
+ // HostName is the name of the immediate field on the object hosting the aggregate.
+ //
+ // For example if averaging Friends.Age on the User collection, this property would be
+ // "Friends".
+ HostName string
- Limit immutables.Option[uint64]
- Offset immutables.Option[uint64]
- OrderBy immutables.Option[OrderBy]
- Filter immutables.Option[Filter]
+ // ChildName is the name of the child field on the object navigated to via [HostName].
+ //
+ // It is optional, for example when counting the number of Friends on User, or when aggregating
+ // scalar arrays, this value will be None.
+ //
+ // When averaging Friends.Age on the User collection, this property would be
+ // "Age".
+ ChildName immutable.Option[string]
}
diff --git a/client/request/cid.go b/client/request/cid.go
new file mode 100644
index 0000000000..42707d0247
--- /dev/null
+++ b/client/request/cid.go
@@ -0,0 +1,25 @@
+// Copyright 2024 Democratized Data Foundation
+//
+// Use of this software is governed by the Business Source License
+// included in the file licenses/BSL.txt.
+//
+// As of the Change Date specified in that file, in accordance with
+// the Business Source License, use of this software will be governed
+// by the Apache License, Version 2.0, included in the file
+// licenses/APL.txt.
+
+package request
+
+import "github.com/sourcenetwork/immutable"
+
+// CIDFilter is an embeddable struct that hosts a consistent set of properties
+// for filtering an aspect of a request by commit CID.
+type CIDFilter struct {
+ // CID is an optional value that selects a single document at the given commit CID
+ // for processing by the request.
+ //
+ // If a commit matching the given CID is not found an error will be returned. The commit
+ // does not need to be the latest, and this property allows viewing of the document at
+ // prior revisions.
+ CID immutable.Option[string]
+}
diff --git a/client/request/commit.go b/client/request/commit.go
index ff65e20822..e44dabf794 100644
--- a/client/request/commit.go
+++ b/client/request/commit.go
@@ -16,20 +16,34 @@ var (
_ Selection = (*CommitSelect)(nil)
)
+// CommitSelect represents the selection of database commits to Defra documents.
type CommitSelect struct {
Field
+ ChildSelect
- DocID immutable.Option[string]
- FieldID immutable.Option[string]
- Cid immutable.Option[string]
- Depth immutable.Option[uint64]
+ CIDFilter
+
+ Limitable
+ Offsetable
+ Orderable
+ Groupable
- Limit immutable.Option[uint64]
- Offset immutable.Option[uint64]
- OrderBy immutable.Option[OrderBy]
- GroupBy immutable.Option[GroupBy]
+ // DocID is an optional filter which when provided will limit commits to those
+ // belonging to the given document.
+ DocID immutable.Option[string]
+
+ // FieldID is an optional filter which when provided will limit commits to those
+ // belonging to the given field.
+ //
+ // `C` may be provided for document-level (composite) commits.
+ FieldID immutable.Option[string]
- Fields []Selection
+ // Depth limits the returned commits to being X places in the history away from the
+ // most current.
+ //
+ // For example if a document has been updated 5 times, and a depth of 2 is provided
+ // only commits for the last two updates will be returned.
+ Depth immutable.Option[uint64]
}
func (c CommitSelect) ToSelect() *Select {
@@ -38,11 +52,10 @@ func (c CommitSelect) ToSelect() *Select {
Name: c.Name,
Alias: c.Alias,
},
- Limit: c.Limit,
- Offset: c.Offset,
- OrderBy: c.OrderBy,
- GroupBy: c.GroupBy,
- Fields: c.Fields,
- Root: CommitSelection,
+ Limitable: c.Limitable,
+ Offsetable: c.Offsetable,
+ Orderable: c.Orderable,
+ Groupable: c.Groupable,
+ ChildSelect: c.ChildSelect,
}
}
diff --git a/client/request/doc_ids.go b/client/request/doc_ids.go
new file mode 100644
index 0000000000..24089d2032
--- /dev/null
+++ b/client/request/doc_ids.go
@@ -0,0 +1,21 @@
+// Copyright 2024 Democratized Data Foundation
+//
+// Use of this software is governed by the Business Source License
+// included in the file licenses/BSL.txt.
+//
+// As of the Change Date specified in that file, in accordance with
+// the Business Source License, use of this software will be governed
+// by the Apache License, Version 2.0, included in the file
+// licenses/APL.txt.
+
+package request
+
+import "github.com/sourcenetwork/immutable"
+
+// DocIDsFilter is an embeddable struct that hosts a consistent set of properties
+// for filtering an aspect of a request by document IDs.
+type DocIDsFilter struct {
+ // DocIDs is an optional value that ensures any records processed by the request
+ // will have one of the given document IDs.
+ DocIDs immutable.Option[[]string]
+}
diff --git a/client/request/field.go b/client/request/field.go
index 578074671b..636a0d97e8 100644
--- a/client/request/field.go
+++ b/client/request/field.go
@@ -14,6 +14,12 @@ import "github.com/sourcenetwork/immutable"
// Field implements Selection
type Field struct {
- Name string
+ // Name contains the name of the field on it's host object.
+ //
+ // For example `email` on a `User` collection, or a `_count` aggregate.
+ Name string
+
+ // Alias is an optional override for Name, if provided results will be returned
+ // from the query using the Alias instead of the Name.
Alias immutable.Option[string]
}
diff --git a/client/request/filter.go b/client/request/filter.go
index 67a80b58e7..aabfafb9b9 100644
--- a/client/request/filter.go
+++ b/client/request/filter.go
@@ -10,6 +10,8 @@
package request
+import "github.com/sourcenetwork/immutable"
+
const (
FilterOpOr = "_or"
FilterOpAnd = "_and"
@@ -24,3 +26,11 @@ type Filter struct {
// parsed filter conditions
Conditions map[string]any
}
+
+// Filterable is an embeddable struct that hosts a consistent set of properties
+// for filtering an aspect of a request.
+type Filterable struct {
+ // OrderBy is an optional set of conditions used to filter records prior to
+ // being processed by the request.
+ Filter immutable.Option[Filter]
+}
diff --git a/client/request/group.go b/client/request/group.go
index e2fd977a00..b38186cb3a 100644
--- a/client/request/group.go
+++ b/client/request/group.go
@@ -10,6 +10,22 @@
package request
+import "github.com/sourcenetwork/immutable"
+
type GroupBy struct {
Fields []string
}
+
+// Groupable is an embeddable struct that hosts a consistent set of properties
+// for grouping an aspect of a request.
+type Groupable struct {
+ // GroupBy is an optional set of fields for which to group the contents of this
+ // request by.
+ //
+ // If this argument is provided, only fields used to group may be rendered in
+ // the immediate child selector. Additional fields may be selected by using
+ // the '_group' selector within the immediate child selector. If an empty set
+ // is provided, the restrictions mentioned still apply, although all results
+ // will appear within the same group.
+ GroupBy immutable.Option[GroupBy]
+}
diff --git a/client/request/limit.go b/client/request/limit.go
new file mode 100644
index 0000000000..2e1b1a4ab7
--- /dev/null
+++ b/client/request/limit.go
@@ -0,0 +1,20 @@
+// Copyright 2024 Democratized Data Foundation
+//
+// Use of this software is governed by the Business Source License
+// included in the file licenses/BSL.txt.
+//
+// As of the Change Date specified in that file, in accordance with
+// the Business Source License, use of this software will be governed
+// by the Apache License, Version 2.0, included in the file
+// licenses/APL.txt.
+
+package request
+
+import "github.com/sourcenetwork/immutable"
+
+// Limitable is an embeddable struct that hosts a consistent set of properties
+// for limiting an aspect of a request.
+type Limitable struct {
+ // Limit is an optional value that caps the number of results to the number provided.
+ Limit immutable.Option[uint64]
+}
diff --git a/client/request/mutation.go b/client/request/mutation.go
index 6bff180dd9..81fcc823c9 100644
--- a/client/request/mutation.go
+++ b/client/request/mutation.go
@@ -10,8 +10,6 @@
package request
-import "github.com/sourcenetwork/immutable"
-
type MutationType int
const (
@@ -25,17 +23,24 @@ const (
// all the possible arguments.
type ObjectMutation struct {
Field
+ ChildSelect
+
+ Filterable
+ DocIDsFilter
+
+ // Type is the type of mutatation that this object represents.
+ //
+ // For example [CreateObjects].
Type MutationType
- // Collection is the target collection name
- // if this mutation is on an object.
+ // Collection is the target collection name.
Collection string
- IDs immutable.Option[[]string]
- Filter immutable.Option[Filter]
- Input map[string]any
-
- Fields []Selection
+ // Input is the json representation of the fieldName-value pairs of document properties
+ // to mutate.
+ //
+ // This is ignored for [DeleteObjects] mutations.
+ Input map[string]any
}
// ToSelect returns a basic Select object, with the same Name, Alias, and Fields as
@@ -46,8 +51,8 @@ func (m ObjectMutation) ToSelect() *Select {
Name: m.Collection,
Alias: m.Alias,
},
- Fields: m.Fields,
- DocIDs: m.IDs,
- Filter: m.Filter,
+ ChildSelect: m.ChildSelect,
+ DocIDsFilter: m.DocIDsFilter,
+ Filterable: m.Filterable,
}
}
diff --git a/client/request/offset.go b/client/request/offset.go
new file mode 100644
index 0000000000..5bb2ea723d
--- /dev/null
+++ b/client/request/offset.go
@@ -0,0 +1,22 @@
+// Copyright 2024 Democratized Data Foundation
+//
+// Use of this software is governed by the Business Source License
+// included in the file licenses/BSL.txt.
+//
+// As of the Change Date specified in that file, in accordance with
+// the Business Source License, use of this software will be governed
+// by the Apache License, Version 2.0, included in the file
+// licenses/APL.txt.
+
+package request
+
+import "github.com/sourcenetwork/immutable"
+
+// Offsetable is an embeddable struct that hosts a consistent set of properties
+// for offsetting an aspect of a request.
+type Offsetable struct {
+ // Offset is an optional value that skips the given number of results that would have
+ // otherwise been returned. Commonly used alongside the limit argument,
+ // this argument will still work on its own.
+ Offset immutable.Option[uint64]
+}
diff --git a/client/request/order.go b/client/request/order.go
index 1fff3953f1..d998843959 100644
--- a/client/request/order.go
+++ b/client/request/order.go
@@ -10,6 +10,8 @@
package request
+import "github.com/sourcenetwork/immutable"
+
type (
OrderDirection string
@@ -29,3 +31,11 @@ type (
Conditions []OrderCondition
}
)
+
+// Orderable is an embeddable struct that hosts a consistent set of properties
+// for ordering an aspect of a request.
+type Orderable struct {
+ // OrderBy is an optional set of field-orders which may be used to sort the results. An
+ // empty set will be ignored.
+ OrderBy immutable.Option[OrderBy]
+}
diff --git a/client/request/select.go b/client/request/select.go
index 863bba2aeb..0365fb385b 100644
--- a/client/request/select.go
+++ b/client/request/select.go
@@ -12,16 +12,6 @@ package request
import (
"encoding/json"
-
- "github.com/sourcenetwork/immutable"
-)
-
-// SelectionType is the type of selection.
-type SelectionType int
-
-const (
- ObjectSelection SelectionType = iota
- CommitSelection
)
// Select is a complex Field with strong typing.
@@ -29,22 +19,29 @@ const (
// Includes fields, and request arguments like filters, limits, etc.
type Select struct {
Field
+ ChildSelect
+
+ Limitable
+ Offsetable
+ Orderable
+ Filterable
+ DocIDsFilter
+ CIDFilter
+ Groupable
+
+ // ShowDeleted will return deleted documents along with non-deleted ones
+ // if set to true.
+ ShowDeleted bool
+}
- DocIDs immutable.Option[[]string]
- CID immutable.Option[string]
-
- // Root is the top level type of parsed request
- Root SelectionType
-
- Limit immutable.Option[uint64]
- Offset immutable.Option[uint64]
- OrderBy immutable.Option[OrderBy]
- GroupBy immutable.Option[GroupBy]
- Filter immutable.Option[Filter]
-
+// ChildSelect represents a type with selectable child properties.
+//
+// At least one child must be selected.
+type ChildSelect struct {
+ // Fields contains the set of child properties to return.
+ //
+ // At least one child property must be selected.
Fields []Selection
-
- ShowDeleted bool
}
// Validate validates the Select.
@@ -111,25 +108,20 @@ func (s *Select) validateGroupBy() []error {
}
// selectJson is a private object used for handling json deserialization
-// of `Select` objects.
+// of [Select] objects.
+//
+// It contains everything minus the [ChildSelect], which uses a custom UnmarshalJSON
+// and is skipped over when embedding due to the way the std lib json pkg works.
type selectJson struct {
Field
- DocIDs immutable.Option[[]string]
- CID immutable.Option[string]
- Root SelectionType
- Limit immutable.Option[uint64]
- Offset immutable.Option[uint64]
- OrderBy immutable.Option[OrderBy]
- GroupBy immutable.Option[GroupBy]
- Filter immutable.Option[Filter]
+ Limitable
+ Offsetable
+ Orderable
+ Filterable
+ DocIDsFilter
+ CIDFilter
+ Groupable
ShowDeleted bool
-
- // Properties above this line match the `Select` object and
- // are deserialized using the normal/default logic.
- // Properties below this line require custom logic in `UnmarshalJSON`
- // in order to be deserialized correctly.
-
- Fields []map[string]json.RawMessage
}
func (s *Select) UnmarshalJSON(bytes []byte) error {
@@ -142,13 +134,37 @@ func (s *Select) UnmarshalJSON(bytes []byte) error {
s.Field = selectMap.Field
s.DocIDs = selectMap.DocIDs
s.CID = selectMap.CID
- s.Root = selectMap.Root
- s.Limit = selectMap.Limit
- s.Offset = selectMap.Offset
- s.OrderBy = selectMap.OrderBy
- s.GroupBy = selectMap.GroupBy
- s.Filter = selectMap.Filter
+ s.Limitable = selectMap.Limitable
+ s.Offsetable = selectMap.Offsetable
+ s.Orderable = selectMap.Orderable
+ s.Groupable = selectMap.Groupable
+ s.Filterable = selectMap.Filterable
s.ShowDeleted = selectMap.ShowDeleted
+
+ var childSelect ChildSelect
+ err = json.Unmarshal(bytes, &childSelect)
+ if err != nil {
+ return err
+ }
+
+ s.ChildSelect = childSelect
+
+ return nil
+}
+
+// childSelectJson is a private object used for handling json deserialization
+// of [ChildSelect] objects.
+type childSelectJson struct {
+ Fields []map[string]json.RawMessage
+}
+
+func (s *ChildSelect) UnmarshalJSON(bytes []byte) error {
+ var selectMap childSelectJson
+ err := json.Unmarshal(bytes, &selectMap)
+ if err != nil {
+ return err
+ }
+
s.Fields = make([]Selection, len(selectMap.Fields))
for i, field := range selectMap.Fields {
@@ -163,8 +179,8 @@ func (s *Select) UnmarshalJSON(bytes []byte) error {
// They must be non-nillable as nil values may have their keys omitted from
// the json. This also relies on the fields being unique. We may wish to change
// this later to custom-serialize with a `_type` property.
- if _, ok := field["Root"]; ok {
- // This must be a Select, as only the `Select` type has a `Root` field
+ if _, ok := field["Fields"]; ok {
+ // This must be a Select, as only the `Select` type has a `Fields` field
var fieldSelect Select
err := json.Unmarshal(fieldJson, &fieldSelect)
if err != nil {
diff --git a/client/request/subscription.go b/client/request/subscription.go
index bb4e01156c..08276e7ef7 100644
--- a/client/request/subscription.go
+++ b/client/request/subscription.go
@@ -19,13 +19,12 @@ import (
// arguments
type ObjectSubscription struct {
Field
+ ChildSelect
+
+ Filterable
// Collection is the target collection name
Collection string
-
- Filter immutable.Option[Filter]
-
- Fields []Selection
}
// ToSelect returns a basic Select object, with the same Name, Alias, and Fields as
@@ -36,9 +35,13 @@ func (m ObjectSubscription) ToSelect(docID, cid string) *Select {
Name: m.Collection,
Alias: m.Alias,
},
- DocIDs: immutable.Some([]string{docID}),
- CID: immutable.Some(cid),
- Fields: m.Fields,
- Filter: m.Filter,
+ DocIDsFilter: DocIDsFilter{
+ DocIDs: immutable.Some([]string{docID}),
+ },
+ CIDFilter: CIDFilter{
+ immutable.Some(cid),
+ },
+ ChildSelect: m.ChildSelect,
+ Filterable: m.Filterable,
}
}
diff --git a/client/schema_description.go b/client/schema_description.go
new file mode 100644
index 0000000000..2d34b131b8
--- /dev/null
+++ b/client/schema_description.go
@@ -0,0 +1,56 @@
+// Copyright 2024 Democratized Data Foundation
+//
+// Use of this software is governed by the Business Source License
+// included in the file licenses/BSL.txt.
+//
+// As of the Change Date specified in that file, in accordance with
+// the Business Source License, use of this software will be governed
+// by the Apache License, Version 2.0, included in the file
+// licenses/APL.txt.
+
+package client
+
+// SchemaDescription describes a Schema and its associated metadata.
+type SchemaDescription struct {
+ // Root is the version agnostic identifier for this schema.
+ //
+ // It remains constant throughout the lifetime of this schema.
+ Root string
+
+ // VersionID is the version-specific identifier for this schema.
+ //
+ // It is generated on mutation of this schema and can be used to uniquely
+ // identify a schema at a specific version.
+ VersionID string
+
+ // Name is the name of this Schema.
+ //
+ // It is currently used to define the Collection Name, and as such these two properties
+ // will currently share the same name.
+ //
+ // It is immutable.
+ Name string
+
+ // Fields contains the fields globally defined across the node network within this Schema.
+ //
+ // Any [CollectionDescription]s that reference this [SchemaDescription] will have a field
+ // set that contains all of these fields, plus any local only fields (such as the secondary side
+ // of a relation).
+ //
+ // Embedded objects (including within Views) are schema-only, and as such fields of embedded
+ // objects will not have a corresponding [CollectionFieldDescription].
+ //
+ // Currently new fields may be added after initial declaration, but they cannot be removed.
+ Fields []SchemaFieldDescription
+}
+
+// GetFieldByName returns the field for the given field name. If such a field is found it
+// will return it and true, if it is not found it will return false.
+func (s SchemaDescription) GetFieldByName(fieldName string) (SchemaFieldDescription, bool) {
+ for _, field := range s.Fields {
+ if field.Name == fieldName {
+ return field, true
+ }
+ }
+ return SchemaFieldDescription{}, false
+}
diff --git a/client/schema_field_description.go b/client/schema_field_description.go
new file mode 100644
index 0000000000..87ee843ec8
--- /dev/null
+++ b/client/schema_field_description.go
@@ -0,0 +1,343 @@
+// Copyright 2024 Democratized Data Foundation
+//
+// Use of this software is governed by the Business Source License
+// included in the file licenses/BSL.txt.
+//
+// As of the Change Date specified in that file, in accordance with
+// the Business Source License, use of this software will be governed
+// by the Apache License, Version 2.0, included in the file
+// licenses/APL.txt.
+
+package client
+
+import (
+ "encoding/json"
+ "strconv"
+ "strings"
+)
+
+// FieldKind describes the type of a field.
+type FieldKind interface {
+ // String returns the string representation of this FieldKind.
+ String() string
+
+ // Underlying returns the underlying Kind as a string.
+ //
+ // If this is an array, it will return the element kind, else it will return the same as
+ // [String()].
+ Underlying() string
+
+ // IsNillable returns true if this kind supports nil values.
+ IsNillable() bool
+
+ // IsObject returns true if this FieldKind is an object type, or an array of object types.
+ IsObject() bool
+
+ // IsObjectArray returns true if this FieldKind is an object array type.
+ IsObjectArray() bool
+
+ // IsArray returns true if this FieldKind is an array type which includes inline arrays as well
+ // as relation arrays.
+ IsArray() bool
+}
+
+// SchemaFieldDescription describes a field on a Schema and its associated metadata.
+type SchemaFieldDescription struct {
+ // Name contains the name of this field.
+ //
+ // It is currently immutable.
+ Name string
+
+ // The data type that this field holds.
+ //
+ // Must contain a valid value. It is currently immutable.
+ Kind FieldKind
+
+ // The CRDT Type of this field. If no type has been provided it will default to [LWW_REGISTER].
+ //
+ // It is currently immutable.
+ Typ CType
+}
+
+// ScalarKind represents singular scalar field kinds, such as `Int`.
+type ScalarKind uint8
+
+// ScalarArrayKind represents arrays of simple scalar field kinds, such as `[Int]`.
+type ScalarArrayKind uint8
+
+// ObjectKind represents singular objects (foreign and embedded), such as `User`.
+type ObjectKind string
+
+// ObjectKind represents arrays of objects (foreign and embedded), such as `[User]`.
+type ObjectArrayKind string
+
+var _ FieldKind = ScalarKind(0)
+var _ FieldKind = ScalarArrayKind(0)
+var _ FieldKind = ObjectKind("")
+var _ FieldKind = ObjectArrayKind("")
+
+func (k ScalarKind) String() string {
+ switch k {
+ case FieldKind_DocID:
+ return "ID"
+ case FieldKind_NILLABLE_BOOL:
+ return "Boolean"
+ case FieldKind_NILLABLE_INT:
+ return "Int"
+ case FieldKind_NILLABLE_DATETIME:
+ return "DateTime"
+ case FieldKind_NILLABLE_FLOAT:
+ return "Float"
+ case FieldKind_NILLABLE_STRING:
+ return "String"
+ case FieldKind_NILLABLE_BLOB:
+ return "Blob"
+ case FieldKind_NILLABLE_JSON:
+ return "JSON"
+ default:
+ return strconv.Itoa(int(k))
+ }
+}
+
+func (k ScalarKind) Underlying() string {
+ return k.String()
+}
+
+func (k ScalarKind) IsNillable() bool {
+ return k != FieldKind_DocID
+}
+
+func (k ScalarKind) IsObject() bool {
+ return false
+}
+
+func (k ScalarKind) IsObjectArray() bool {
+ return false
+}
+
+func (k ScalarKind) IsArray() bool {
+ return false
+}
+
+func (k ScalarArrayKind) String() string {
+ switch k {
+ case FieldKind_NILLABLE_BOOL_ARRAY:
+ return "[Boolean]"
+ case FieldKind_BOOL_ARRAY:
+ return "[Boolean!]"
+ case FieldKind_NILLABLE_INT_ARRAY:
+ return "[Int]"
+ case FieldKind_INT_ARRAY:
+ return "[Int!]"
+ case FieldKind_NILLABLE_FLOAT_ARRAY:
+ return "[Float]"
+ case FieldKind_FLOAT_ARRAY:
+ return "[Float!]"
+ case FieldKind_NILLABLE_STRING_ARRAY:
+ return "[String]"
+ case FieldKind_STRING_ARRAY:
+ return "[String!]"
+ default:
+ return strconv.Itoa(int(k))
+ }
+}
+
+func (k ScalarArrayKind) Underlying() string {
+ return strings.Trim(k.String(), "[]")
+}
+
+func (k ScalarArrayKind) IsNillable() bool {
+ return true
+}
+
+func (k ScalarArrayKind) IsObject() bool {
+ return false
+}
+
+func (k ScalarArrayKind) IsObjectArray() bool {
+ return false
+}
+
+func (k ScalarArrayKind) IsArray() bool {
+ return true
+}
+
+func (k ObjectKind) String() string {
+ return string(k)
+}
+
+func (k ObjectKind) Underlying() string {
+ return k.String()
+}
+
+func (k ObjectKind) IsNillable() bool {
+ return true
+}
+
+func (k ObjectKind) IsObject() bool {
+ return true
+}
+
+func (k ObjectKind) IsObjectArray() bool {
+ return false
+}
+
+func (k ObjectKind) IsArray() bool {
+ return false
+}
+
+func (k ObjectArrayKind) String() string {
+ return "[" + string(k) + "]"
+}
+
+func (k ObjectArrayKind) Underlying() string {
+ return strings.Trim(k.String(), "[]")
+}
+
+func (k ObjectArrayKind) IsNillable() bool {
+ return true
+}
+
+func (k ObjectArrayKind) IsObject() bool {
+ return true
+}
+
+func (k ObjectArrayKind) IsObjectArray() bool {
+ return true
+}
+
+func (k ObjectArrayKind) IsArray() bool {
+ return true
+}
+
+func (k ObjectArrayKind) MarshalJSON() ([]byte, error) {
+ return []byte(`"` + k.String() + `"`), nil
+}
+
+// Note: These values are serialized and persisted in the database, avoid modifying existing values.
+const (
+ FieldKind_None ScalarKind = 0
+ FieldKind_DocID ScalarKind = 1
+ FieldKind_NILLABLE_BOOL ScalarKind = 2
+ FieldKind_BOOL_ARRAY ScalarArrayKind = 3
+ FieldKind_NILLABLE_INT ScalarKind = 4
+ FieldKind_INT_ARRAY ScalarArrayKind = 5
+ FieldKind_NILLABLE_FLOAT ScalarKind = 6
+ FieldKind_FLOAT_ARRAY ScalarArrayKind = 7
+ _ ScalarKind = 8 // safe to repurpose (was never used)
+ _ ScalarKind = 9 // safe to repurpose (previously old field)
+ FieldKind_NILLABLE_DATETIME ScalarKind = 10
+ FieldKind_NILLABLE_STRING ScalarKind = 11
+ FieldKind_STRING_ARRAY ScalarArrayKind = 12
+ FieldKind_NILLABLE_BLOB ScalarKind = 13
+ FieldKind_NILLABLE_JSON ScalarKind = 14
+ _ ScalarKind = 15 // safe to repurpose (was never used)
+ _ ScalarKind = 16 // Deprecated 2024-03-15, was FieldKind_FOREIGN_OBJECT
+ _ ScalarKind = 17 // Deprecated 2024-03-15, was FieldKind_FOREIGN_OBJECT_ARRAY
+ FieldKind_NILLABLE_BOOL_ARRAY ScalarArrayKind = 18
+ FieldKind_NILLABLE_INT_ARRAY ScalarArrayKind = 19
+ FieldKind_NILLABLE_FLOAT_ARRAY ScalarArrayKind = 20
+ FieldKind_NILLABLE_STRING_ARRAY ScalarArrayKind = 21
+)
+
+// FieldKindStringToEnumMapping maps string representations of [FieldKind] values to
+// their enum values.
+//
+// It is currently used to by [db.PatchSchema] to allow string representations of
+// [FieldKind] to be provided instead of their raw int values. This usage may expand
+// in the future. They currently roughly correspond to the GQL field types, but this
+// equality is not guaranteed.
+var FieldKindStringToEnumMapping = map[string]FieldKind{
+ "ID": FieldKind_DocID,
+ "Boolean": FieldKind_NILLABLE_BOOL,
+ "[Boolean]": FieldKind_NILLABLE_BOOL_ARRAY,
+ "[Boolean!]": FieldKind_BOOL_ARRAY,
+ "Int": FieldKind_NILLABLE_INT,
+ "[Int]": FieldKind_NILLABLE_INT_ARRAY,
+ "[Int!]": FieldKind_INT_ARRAY,
+ "DateTime": FieldKind_NILLABLE_DATETIME,
+ "Float": FieldKind_NILLABLE_FLOAT,
+ "[Float]": FieldKind_NILLABLE_FLOAT_ARRAY,
+ "[Float!]": FieldKind_FLOAT_ARRAY,
+ "String": FieldKind_NILLABLE_STRING,
+ "[String]": FieldKind_NILLABLE_STRING_ARRAY,
+ "[String!]": FieldKind_STRING_ARRAY,
+ "Blob": FieldKind_NILLABLE_BLOB,
+ "JSON": FieldKind_NILLABLE_JSON,
+}
+
+// IsRelation returns true if this field is a relation.
+func (f SchemaFieldDescription) IsRelation() bool {
+ return f.Kind.IsObject()
+}
+
+// schemaFieldDescription is a private type used to facilitate the unmarshalling
+// of json to a [SchemaFieldDescription].
+type schemaFieldDescription struct {
+ Name string
+ Typ CType
+
+ // Properties below this line are unmarshalled using custom logic in [UnmarshalJSON]
+ Kind json.RawMessage
+}
+
+func (f *SchemaFieldDescription) UnmarshalJSON(bytes []byte) error {
+ var descMap schemaFieldDescription
+ err := json.Unmarshal(bytes, &descMap)
+ if err != nil {
+ return err
+ }
+
+ f.Name = descMap.Name
+ f.Typ = descMap.Typ
+ f.Kind, err = parseFieldKind(descMap.Kind)
+ if err != nil {
+ return err
+ }
+
+ return nil
+}
+
+func parseFieldKind(bytes json.RawMessage) (FieldKind, error) {
+ if len(bytes) == 0 {
+ return FieldKind_None, nil
+ }
+
+ if bytes[0] != '"' {
+ // If the Kind is not represented by a string, assume try to parse it to an int, as
+ // that is the only other type we support.
+ var intKind uint8
+ err := json.Unmarshal(bytes, &intKind)
+ if err != nil {
+ return nil, err
+ }
+ switch intKind {
+ case uint8(FieldKind_BOOL_ARRAY), uint8(FieldKind_INT_ARRAY), uint8(FieldKind_FLOAT_ARRAY),
+ uint8(FieldKind_STRING_ARRAY), uint8(FieldKind_NILLABLE_BOOL_ARRAY), uint8(FieldKind_NILLABLE_INT_ARRAY),
+ uint8(FieldKind_NILLABLE_FLOAT_ARRAY), uint8(FieldKind_NILLABLE_STRING_ARRAY):
+ return ScalarArrayKind(intKind), nil
+ default:
+ return ScalarKind(intKind), nil
+ }
+ }
+
+ var strKind string
+ err := json.Unmarshal(bytes, &strKind)
+ if err != nil {
+ return nil, err
+ }
+
+ kind, ok := FieldKindStringToEnumMapping[strKind]
+ if ok {
+ return kind, nil
+ }
+
+ // If we don't find the string representation of this type in the
+ // scalar mapping, assume it is an object - if it is not, validation
+ // will catch this later. If it is unknown we have no way of telling
+ // as to whether the user thought it was a scalar or an object anyway.
+ if strKind[0] == '[' {
+ return ObjectArrayKind(strings.Trim(strKind, "[]")), nil
+ }
+ return ObjectKind(strKind), nil
+}
diff --git a/client/value.go b/client/value.go
index 261535d8d2..bc84205cd9 100644
--- a/client/value.go
+++ b/client/value.go
@@ -17,11 +17,11 @@ import (
type FieldValue struct {
t CType
- value any
+ value NormalValue
isDirty bool
}
-func NewFieldValue(t CType, val any) *FieldValue {
+func NewFieldValue(t CType, val NormalValue) *FieldValue {
return &FieldValue{
t: t,
value: val,
@@ -30,6 +30,10 @@ func NewFieldValue(t CType, val any) *FieldValue {
}
func (val FieldValue) Value() any {
+ return val.value.Unwrap()
+}
+
+func (val FieldValue) NormalValue() NormalValue {
return val.value
}
@@ -38,7 +42,7 @@ func (val FieldValue) Type() CType {
}
func (val FieldValue) IsDocument() bool {
- _, ok := val.value.(*Document)
+ _, ok := val.value.Document()
return ok
}
@@ -62,30 +66,27 @@ func (val FieldValue) Bytes() ([]byte, error) {
}
var value any
- switch tempVal := val.value.(type) {
- case []immutable.Option[string]:
- value = convertImmutable(tempVal)
- case []immutable.Option[int64]:
- value = convertImmutable(tempVal)
- case []immutable.Option[float64]:
- value = convertImmutable(tempVal)
- case []immutable.Option[bool]:
- value = convertImmutable(tempVal)
- default:
- value = val.value
+ if v, ok := val.value.NillableStringArray(); ok {
+ value = convertImmutable(v)
+ } else if v, ok := val.value.NillableIntArray(); ok {
+ value = convertImmutable(v)
+ } else if v, ok := val.value.NillableFloatArray(); ok {
+ value = convertImmutable(v)
+ } else if v, ok := val.value.NillableBoolArray(); ok {
+ value = convertImmutable(v)
+ } else {
+ value = val.value.Unwrap()
}
return em.Marshal(value)
}
func convertImmutable[T any](vals []immutable.Option[T]) []any {
- var out []any
- for _, val := range vals {
- if !val.HasValue() {
- out = append(out, nil)
- continue
+ out := make([]any, len(vals))
+ for i := range vals {
+ if vals[i].HasValue() {
+ out[i] = vals[i].Value()
}
- out = append(out, val.Value())
}
return out
}
diff --git a/core/crdt/pncounter.go b/core/crdt/counter.go
similarity index 56%
rename from core/crdt/pncounter.go
rename to core/crdt/counter.go
index 7d8b02c1a4..01ca3cf0da 100644
--- a/core/crdt/pncounter.go
+++ b/core/crdt/counter.go
@@ -1,4 +1,4 @@
-// Copyright 2023 Democratized Data Foundation
+// Copyright 2024 Democratized Data Foundation
//
// Use of this software is governed by the Business Source License
// included in the file licenses/BSL.txt.
@@ -33,18 +33,18 @@ import (
var (
// ensure types implements core interfaces
- _ core.ReplicatedData = (*PNCounter[float64])(nil)
- _ core.ReplicatedData = (*PNCounter[int64])(nil)
- _ core.Delta = (*PNCounterDelta[float64])(nil)
- _ core.Delta = (*PNCounterDelta[int64])(nil)
+ _ core.ReplicatedData = (*Counter[float64])(nil)
+ _ core.ReplicatedData = (*Counter[int64])(nil)
+ _ core.Delta = (*CounterDelta[float64])(nil)
+ _ core.Delta = (*CounterDelta[int64])(nil)
)
type Incrementable interface {
constraints.Integer | constraints.Float
}
-// PNCounterDelta is a single delta operation for an PNCounter
-type PNCounterDelta[T Incrementable] struct {
+// CounterDelta is a single delta operation for a Counter
+type CounterDelta[T Incrementable] struct {
DocID []byte
FieldName string
Priority uint64
@@ -59,17 +59,17 @@ type PNCounterDelta[T Incrementable] struct {
}
// GetPriority gets the current priority for this delta.
-func (delta *PNCounterDelta[T]) GetPriority() uint64 {
+func (delta *CounterDelta[T]) GetPriority() uint64 {
return delta.Priority
}
// SetPriority will set the priority for this delta.
-func (delta *PNCounterDelta[T]) SetPriority(prio uint64) {
+func (delta *CounterDelta[T]) SetPriority(prio uint64) {
delta.Priority = prio
}
// Marshal encodes the delta using CBOR.
-func (delta *PNCounterDelta[T]) Marshal() ([]byte, error) {
+func (delta *CounterDelta[T]) Marshal() ([]byte, error) {
h := &codec.CborHandle{}
buf := bytes.NewBuffer(nil)
enc := codec.NewEncoder(buf, h)
@@ -81,44 +81,50 @@ func (delta *PNCounterDelta[T]) Marshal() ([]byte, error) {
}
// Unmarshal decodes the delta from CBOR.
-func (delta *PNCounterDelta[T]) Unmarshal(b []byte) error {
+func (delta *CounterDelta[T]) Unmarshal(b []byte) error {
h := &codec.CborHandle{}
dec := codec.NewDecoderBytes(b, h)
return dec.Decode(delta)
}
-// PNCounter, is a simple CRDT type that allows increment/decrement
+// Counter, is a simple CRDT type that allows increment/decrement
// of an Int and Float data types that ensures convergence.
-type PNCounter[T Incrementable] struct {
+type Counter[T Incrementable] struct {
baseCRDT
+ AllowDecrement bool
}
-// NewPNCounter returns a new instance of the PNCounter with the given ID.
-func NewPNCounter[T Incrementable](
+// NewCounter returns a new instance of the Counter with the given ID.
+func NewCounter[T Incrementable](
store datastore.DSReaderWriter,
schemaVersionKey core.CollectionSchemaVersionKey,
key core.DataStoreKey,
fieldName string,
-) PNCounter[T] {
- return PNCounter[T]{newBaseCRDT(store, key, schemaVersionKey, fieldName)}
+ allowDecrement bool,
+) Counter[T] {
+ return Counter[T]{newBaseCRDT(store, key, schemaVersionKey, fieldName), allowDecrement}
}
-// Value gets the current register value
-func (reg PNCounter[T]) Value(ctx context.Context) ([]byte, error) {
- valueK := reg.key.WithValueFlag()
- buf, err := reg.store.Get(ctx, valueK.ToDS())
+// Value gets the current counter value
+func (c Counter[T]) Value(ctx context.Context) ([]byte, error) {
+ valueK := c.key.WithValueFlag()
+ buf, err := c.store.Get(ctx, valueK.ToDS())
if err != nil {
return nil, err
}
return buf, nil
}
-// Set generates a new delta with the supplied value
-func (reg PNCounter[T]) Increment(ctx context.Context, value T) (*PNCounterDelta[T], error) {
+// Set generates a new delta with the supplied value.
+//
+// WARNING: Incrementing an integer and causing it to overflow the int64 max value
+// will cause the value to roll over to the int64 min value. Incremeting a float and
+// causing it to overflow the float64 max value will act like a no-op.
+func (c Counter[T]) Increment(ctx context.Context, value T) (*CounterDelta[T], error) {
// To ensure that the dag block is unique, we add a random number to the delta.
// This is done only on update (if the doc doesn't already exist) to ensure that the
// initial dag block of a document can be reproducible.
- exists, err := reg.store.Has(ctx, reg.key.ToPrimaryDataStoreKey().ToDS())
+ exists, err := c.store.Has(ctx, c.key.ToPrimaryDataStoreKey().ToDS())
if err != nil {
return nil, err
}
@@ -131,29 +137,32 @@ func (reg PNCounter[T]) Increment(ctx context.Context, value T) (*PNCounterDelta
nonce = r.Int64()
}
- return &PNCounterDelta[T]{
- DocID: []byte(reg.key.DocID),
- FieldName: reg.fieldName,
+ return &CounterDelta[T]{
+ DocID: []byte(c.key.DocID),
+ FieldName: c.fieldName,
Data: value,
- SchemaVersionID: reg.schemaVersionKey.SchemaVersionId,
+ SchemaVersionID: c.schemaVersionKey.SchemaVersionId,
Nonce: nonce,
}, nil
}
// Merge implements ReplicatedData interface.
-// It merges two PNCounterRegisty by adding the values together.
-func (reg PNCounter[T]) Merge(ctx context.Context, delta core.Delta) error {
- d, ok := delta.(*PNCounterDelta[T])
+// It merges two CounterRegisty by adding the values together.
+func (c Counter[T]) Merge(ctx context.Context, delta core.Delta) error {
+ d, ok := delta.(*CounterDelta[T])
if !ok {
return ErrMismatchedMergeType
}
- return reg.incrementValue(ctx, d.Data, d.GetPriority())
+ return c.incrementValue(ctx, d.Data, d.GetPriority())
}
-func (reg PNCounter[T]) incrementValue(ctx context.Context, value T, priority uint64) error {
- key := reg.key.WithValueFlag()
- marker, err := reg.store.Get(ctx, reg.key.ToPrimaryDataStoreKey().ToDS())
+func (c Counter[T]) incrementValue(ctx context.Context, value T, priority uint64) error {
+ if !c.AllowDecrement && value < 0 {
+ return NewErrNegativeValue(value)
+ }
+ key := c.key.WithValueFlag()
+ marker, err := c.store.Get(ctx, c.key.ToPrimaryDataStoreKey().ToDS())
if err != nil && !errors.Is(err, ds.ErrNotFound) {
return err
}
@@ -161,7 +170,7 @@ func (reg PNCounter[T]) incrementValue(ctx context.Context, value T, priority ui
key = key.WithDeletedFlag()
}
- curValue, err := reg.getCurrentValue(ctx, key)
+ curValue, err := c.getCurrentValue(ctx, key)
if err != nil {
return err
}
@@ -172,16 +181,16 @@ func (reg PNCounter[T]) incrementValue(ctx context.Context, value T, priority ui
return err
}
- err = reg.store.Put(ctx, key.ToDS(), b)
+ err = c.store.Put(ctx, key.ToDS(), b)
if err != nil {
return NewErrFailedToStoreValue(err)
}
- return reg.setPriority(ctx, reg.key, priority)
+ return c.setPriority(ctx, c.key, priority)
}
-func (reg PNCounter[T]) getCurrentValue(ctx context.Context, key core.DataStoreKey) (T, error) {
- curValue, err := reg.store.Get(ctx, key.ToDS())
+func (c Counter[T]) getCurrentValue(ctx context.Context, key core.DataStoreKey) (T, error) {
+ curValue, err := c.store.Get(ctx, key.ToDS())
if err != nil {
if errors.Is(err, ds.ErrNotFound) {
return 0, nil
@@ -192,14 +201,14 @@ func (reg PNCounter[T]) getCurrentValue(ctx context.Context, key core.DataStoreK
return getNumericFromBytes[T](curValue)
}
-// DeltaDecode is a typed helper to extract a PNCounterDelta from a ipld.Node
-func (reg PNCounter[T]) DeltaDecode(node ipld.Node) (core.Delta, error) {
+// DeltaDecode is a typed helper to extract a CounterDelta from a ipld.Node
+func (c Counter[T]) DeltaDecode(node ipld.Node) (core.Delta, error) {
pbNode, ok := node.(*dag.ProtoNode)
if !ok {
return nil, client.NewErrUnexpectedType[*dag.ProtoNode]("ipld.Node", node)
}
- delta := &PNCounterDelta[T]{}
+ delta := &CounterDelta[T]{}
err := delta.Unmarshal(pbNode.Data())
if err != nil {
return nil, err
@@ -208,6 +217,13 @@ func (reg PNCounter[T]) DeltaDecode(node ipld.Node) (core.Delta, error) {
return delta, nil
}
+func (c Counter[T]) CType() client.CType {
+ if c.AllowDecrement {
+ return client.PN_COUNTER
+ }
+ return client.P_COUNTER
+}
+
func getNumericFromBytes[T Incrementable](b []byte) (T, error) {
var val T
err := cbor.Unmarshal(b, &val)
diff --git a/core/crdt/errors.go b/core/crdt/errors.go
index e1148d1044..75af579850 100644
--- a/core/crdt/errors.go
+++ b/core/crdt/errors.go
@@ -17,6 +17,7 @@ import (
const (
errFailedToGetPriority string = "failed to get priority"
errFailedToStoreValue string = "failed to store value"
+ errNegativeValue string = "value cannot be negative"
)
// Errors returnable from this package.
@@ -26,6 +27,7 @@ const (
var (
ErrFailedToGetPriority = errors.New(errFailedToGetPriority)
ErrFailedToStoreValue = errors.New(errFailedToStoreValue)
+ ErrNegativeValue = errors.New(errNegativeValue)
ErrEncodingPriority = errors.New("error encoding priority")
ErrDecodingPriority = errors.New("error decoding priority")
// ErrMismatchedMergeType - Tying to merge two ReplicatedData of different types
@@ -41,3 +43,7 @@ func NewErrFailedToGetPriority(inner error) error {
func NewErrFailedToStoreValue(inner error) error {
return errors.Wrap(errFailedToStoreValue, inner)
}
+
+func NewErrNegativeValue[T Incrementable](value T) error {
+ return errors.New(errNegativeValue, errors.NewKV("Value", value))
+}
diff --git a/core/encoding.go b/core/encoding.go
index 40e74915b8..eab401c7a2 100644
--- a/core/encoding.go
+++ b/core/encoding.go
@@ -17,7 +17,6 @@ import (
"github.com/sourcenetwork/immutable"
"github.com/sourcenetwork/defradb/client"
- "github.com/sourcenetwork/defradb/client/request"
"github.com/sourcenetwork/defradb/encoding"
)
@@ -245,16 +244,18 @@ func DecodeIndexDataStoreKey(
i := len(key.Fields)
descending := false
+ var kind client.FieldKind = client.FieldKind_DocID
// If the key has more values encoded then fields on the index description, the last
// value must be the docID and we treat it as a string.
if i < len(indexDesc.Fields) {
descending = indexDesc.Fields[i].Descending
+ kind = fields[i].Kind
} else if i > len(indexDesc.Fields) {
return IndexDataStoreKey{}, ErrInvalidKey
}
- var val any
- data, val, err = encoding.DecodeFieldValue(data, descending)
+ var val client.NormalValue
+ data, val, err = encoding.DecodeFieldValue(data, descending, kind)
if err != nil {
return IndexDataStoreKey{}, err
}
@@ -262,34 +263,7 @@ func DecodeIndexDataStoreKey(
key.Fields = append(key.Fields, IndexedField{Value: val, Descending: descending})
}
- err = normalizeIndexDataStoreKeyValues(&key, fields)
- return key, err
-}
-
-// normalizeIndexDataStoreKeyValues converts all field values to standardized
-// Defra Go type according to fields description.
-func normalizeIndexDataStoreKeyValues(key *IndexDataStoreKey, fields []client.FieldDefinition) error {
- for i := range key.Fields {
- if key.Fields[i].Value == nil {
- continue
- }
- var err error
- var val any
- if i == len(key.Fields)-1 && len(key.Fields)-len(fields) == 1 {
- bytes, ok := key.Fields[i].Value.([]byte)
- if !ok {
- return client.NewErrUnexpectedType[[]byte](request.DocIDArgName, key.Fields[i].Value)
- }
- val = string(bytes)
- } else {
- val, err = NormalizeFieldValue(fields[i], key.Fields[i].Value)
- }
- if err != nil {
- return err
- }
- key.Fields[i].Value = val
- }
- return nil
+ return key, nil
}
// EncodeIndexDataStoreKey encodes a IndexDataStoreKey to bytes to be stored as a key
diff --git a/core/errors.go b/core/errors.go
index 440e5778ac..d9ae72e0c4 100644
--- a/core/errors.go
+++ b/core/errors.go
@@ -17,6 +17,7 @@ import (
const (
errFailedToGetFieldIdOfKey string = "failed to get FieldID of Key"
errInvalidFieldIndex string = "invalid field index"
+ errInvalidFieldValue string = "invalid field value"
)
var (
@@ -24,6 +25,7 @@ var (
ErrEmptyKey = errors.New("received empty key string")
ErrInvalidKey = errors.New("invalid key string")
ErrInvalidFieldIndex = errors.New(errInvalidFieldIndex)
+ ErrInvalidFieldValue = errors.New(errInvalidFieldValue)
)
// NewErrFailedToGetFieldIdOfKey returns the error indicating failure to get FieldID of Key.
@@ -35,3 +37,8 @@ func NewErrFailedToGetFieldIdOfKey(inner error) error {
func NewErrInvalidFieldIndex(i int) error {
return errors.New(errInvalidFieldIndex, errors.NewKV("index", i))
}
+
+// NewErrInvalidFieldValue returns the error indicating invalid field value.
+func NewErrInvalidFieldValue(reason string) error {
+ return errors.New(errInvalidFieldValue, errors.NewKV("Reason", reason))
+}
diff --git a/core/key.go b/core/key.go
index 4017d445b0..69b19efb6e 100644
--- a/core/key.go
+++ b/core/key.go
@@ -43,7 +43,8 @@ const (
)
const (
- COLLECTION = "/collection/id"
+ COLLECTION = "collection"
+ COLLECTION_ID = "/collection/id"
COLLECTION_NAME = "/collection/name"
COLLECTION_SCHEMA_VERSION = "/collection/version"
COLLECTION_INDEX = "/collection/index"
@@ -79,7 +80,7 @@ var _ Key = (*DataStoreKey)(nil)
// value of a field in an index.
type IndexedField struct {
// Value is the value of the field in the index
- Value any
+ Value client.NormalValue
// Descending is true if the field is sorted in descending order
Descending bool
}
@@ -326,7 +327,7 @@ func NewCollectionIndexKey(colID immutable.Option[uint32], indexName string) Col
// Where [IndexName] might be omitted. Anything else will return an error.
func NewCollectionIndexKeyFromString(key string) (CollectionIndexKey, error) {
keyArr := strings.Split(key, "/")
- if len(keyArr) < 4 || len(keyArr) > 5 || keyArr[1] != "collection" || keyArr[2] != "index" {
+ if len(keyArr) < 4 || len(keyArr) > 5 || keyArr[1] != COLLECTION || keyArr[2] != "index" {
return CollectionIndexKey{}, ErrInvalidKey
}
@@ -564,7 +565,7 @@ func (k PrimaryDataStoreKey) ToString() string {
}
func (k CollectionKey) ToString() string {
- return fmt.Sprintf("%s/%s", COLLECTION, strconv.Itoa(int(k.CollectionID)))
+ return fmt.Sprintf("%s/%s", COLLECTION_ID, strconv.Itoa(int(k.CollectionID)))
}
func (k CollectionKey) Bytes() []byte {
diff --git a/core/key_test.go b/core/key_test.go
index 3fa7f41a63..7791075a17 100644
--- a/core/key_test.go
+++ b/core/key_test.go
@@ -220,26 +220,26 @@ func TestIndexDatastoreKey_Bytes(t *testing.T) {
Name: "collection, index and one field",
CollectionID: 1,
IndexID: 2,
- Fields: []IndexedField{{Value: 5}},
+ Fields: []IndexedField{{Value: client.NewNormalInt(5)}},
Expected: encodeKey(1, 2, 5, false),
},
{
Name: "collection, index and two fields",
CollectionID: 1,
IndexID: 2,
- Fields: []IndexedField{{Value: 5}, {Value: 7}},
+ Fields: []IndexedField{{Value: client.NewNormalInt(5)}, {Value: client.NewNormalInt(7)}},
Expected: encodeKey(1, 2, 5, false, 7, false),
},
{
Name: "no index",
CollectionID: 1,
- Fields: []IndexedField{{Value: 5}},
+ Fields: []IndexedField{{Value: client.NewNormalInt(5)}},
Expected: encoding.EncodeUvarintAscending([]byte{'/'}, 1),
},
{
Name: "no collection",
IndexID: 2,
- Fields: []IndexedField{{Value: 5}},
+ Fields: []IndexedField{{Value: client.NewNormalInt(5)}},
Expected: []byte{},
},
}
@@ -255,12 +255,12 @@ func TestIndexDatastoreKey_Bytes(t *testing.T) {
}
func TestIndexDatastoreKey_ToString(t *testing.T) {
- key := NewIndexDataStoreKey(1, 2, []IndexedField{{Value: 5}})
+ key := NewIndexDataStoreKey(1, 2, []IndexedField{{Value: client.NewNormalInt(5)}})
assert.Equal(t, key.ToString(), string(encodeKey(1, 2, 5, false)))
}
func TestIndexDatastoreKey_ToDS(t *testing.T) {
- key := NewIndexDataStoreKey(1, 2, []IndexedField{{Value: 5}})
+ key := NewIndexDataStoreKey(1, 2, []IndexedField{{Value: client.NewNormalInt(5)}})
assert.Equal(t, key.ToDS(), ds.NewKey(string(encodeKey(1, 2, 5, false))))
}
@@ -288,7 +288,7 @@ func TestDecodeIndexDataStoreKey(t *testing.T) {
Fields: []client.IndexedFieldDescription{{}},
},
inputBytes: encodeKey(colID, indexID, 5, false),
- expectedFields: []IndexedField{{Value: int64(5)}},
+ expectedFields: []IndexedField{{Value: client.NewNormalInt(5)}},
},
{
name: "two fields (one descending)",
@@ -296,8 +296,11 @@ func TestDecodeIndexDataStoreKey(t *testing.T) {
ID: indexID,
Fields: []client.IndexedFieldDescription{{}, {Descending: true}},
},
- inputBytes: encodeKey(colID, indexID, 5, false, 7, true),
- expectedFields: []IndexedField{{Value: int64(5)}, {Value: int64(7), Descending: true}},
+ inputBytes: encodeKey(colID, indexID, 5, false, 7, true),
+ expectedFields: []IndexedField{
+ {Value: client.NewNormalInt(5)},
+ {Value: client.NewNormalInt(7), Descending: true},
+ },
},
{
name: "last encoded value without matching field description is docID",
@@ -305,9 +308,12 @@ func TestDecodeIndexDataStoreKey(t *testing.T) {
ID: indexID,
Fields: []client.IndexedFieldDescription{{}},
},
- inputBytes: encoding.EncodeStringAscending(append(encodeKey(1, indexID, 5, false), '/'), "docID"),
- expectedFields: []IndexedField{{Value: int64(5)}, {Value: "docID"}},
- fieldKinds: []client.FieldKind{client.FieldKind_NILLABLE_INT},
+ inputBytes: encoding.EncodeStringAscending(append(encodeKey(1, indexID, 5, false), '/'), "docID"),
+ expectedFields: []IndexedField{
+ {Value: client.NewNormalInt(5)},
+ {Value: client.NewNormalString("docID")},
+ },
+ fieldKinds: []client.FieldKind{client.FieldKind_NILLABLE_INT},
},
}
@@ -384,11 +390,6 @@ func TestDecodeIndexDataStoreKey_InvalidKey(t *testing.T) {
val: encodeKey(colID, indexID, 5, false, 7, false, 9, false),
numFields: 2,
},
- {
- name: "invalid docID value",
- val: encoding.EncodeUvarintAscending(append(encodeKey(colID, indexID, 5, false), '/'), 5),
- numFields: 1,
- },
}
indexDesc := client.IndexDescription{ID: indexID, Fields: []client.IndexedFieldDescription{{}}}
for _, c := range cases {
diff --git a/core/parser.go b/core/parser.go
index 05a90d0526..619f3fd1c2 100644
--- a/core/parser.go
+++ b/core/parser.go
@@ -51,6 +51,10 @@ type Parser interface {
NewFilterFromString(collectionType string, body string) (immutable.Option[request.Filter], error)
// ParseSDL parses an SDL string into a set of collection descriptions.
+ //
+ // The parsing should validate the syntax, but not validate what that syntax expresses
+ // is valid or not, i.e. we don't want the parser to make remote calls to verify the
+ // policy description is valid or not (that is the callers responsiblity).
ParseSDL(ctx context.Context, schemaString string) ([]client.CollectionDefinition, error)
// Adds the given schema to this parser's model.
diff --git a/datastore/blockstore.go b/datastore/blockstore.go
index 8525f8410e..be25894a3d 100644
--- a/datastore/blockstore.go
+++ b/datastore/blockstore.go
@@ -64,7 +64,6 @@ func (bs *bstore) HashOnRead(enabled bool) {
// Get returns a block from the blockstore.
func (bs *bstore) Get(ctx context.Context, k cid.Cid) (blocks.Block, error) {
if !k.Defined() {
- log.Error(ctx, "Undefined CID in blockstore")
return nil, ipld.ErrNotFound{Cid: k}
}
bdata, err := bs.store.Get(ctx, dshelp.MultihashToDsKey(k.Hash()))
@@ -164,13 +163,13 @@ func (bs *bstore) AllKeysChan(ctx context.Context) (<-chan cid.Cid, error) {
return
}
if e.Error != nil {
- log.ErrorE(ctx, "Blockstore.AllKeysChan errored", e.Error)
+ log.ErrorContextE(ctx, "Blockstore.AllKeysChan errored", e.Error)
return
}
hash, err := dshelp.DsKeyToMultihash(ds.RawKey(e.Key))
if err != nil {
- log.ErrorE(ctx, "Error parsing key from binary", err)
+ log.ErrorContextE(ctx, "Error parsing key from binary", err)
continue
}
k := cid.NewCidV1(cid.Raw, hash)
diff --git a/datastore/store.go b/datastore/store.go
index 759eef01db..7f2764a65d 100644
--- a/datastore/store.go
+++ b/datastore/store.go
@@ -14,12 +14,13 @@ import (
blockstore "github.com/ipfs/boxo/blockstore"
ds "github.com/ipfs/go-datastore"
+ "github.com/sourcenetwork/corelog"
+
"github.com/sourcenetwork/defradb/datastore/iterable"
- "github.com/sourcenetwork/defradb/logging"
)
var (
- log = logging.MustNewLogger("store")
+ log = corelog.NewLogger("store")
)
// RootStore wraps Batching and TxnDatastore requiring datastore to support both batching and transactions.
diff --git a/db/backup.go b/db/backup.go
index d47b3534e1..1353376f34 100644
--- a/db/backup.go
+++ b/db/backup.go
@@ -19,10 +19,9 @@ import (
"github.com/sourcenetwork/defradb/client"
"github.com/sourcenetwork/defradb/client/request"
- "github.com/sourcenetwork/defradb/datastore"
)
-func (db *db) basicImport(ctx context.Context, txn datastore.Txn, filepath string) (err error) {
+func (db *db) basicImport(ctx context.Context, filepath string) (err error) {
f, err := os.Open(filepath)
if err != nil {
return NewErrOpenFile(err, filepath)
@@ -49,7 +48,7 @@ func (db *db) basicImport(ctx context.Context, txn datastore.Txn, filepath strin
return err
}
colName := t.(string)
- col, err := db.getCollectionByName(ctx, txn, colName)
+ col, err := db.getCollectionByName(ctx, colName)
if err != nil {
return NewErrFailedToGetCollection(colName, err)
}
@@ -72,7 +71,7 @@ func (db *db) basicImport(ctx context.Context, txn datastore.Txn, filepath strin
// check if self referencing and remove from docMap for key creation
resetMap := map[string]any{}
for _, field := range col.Schema().Fields {
- if field.Kind == client.FieldKind_FOREIGN_OBJECT {
+ if field.Kind.IsObject() && !field.Kind.IsArray() {
if val, ok := docMap[field.Name+request.RelatedObjectID]; ok {
if docMap[request.NewDocIDFieldName] == val {
resetMap[field.Name+request.RelatedObjectID] = val
@@ -85,12 +84,12 @@ func (db *db) basicImport(ctx context.Context, txn datastore.Txn, filepath strin
delete(docMap, request.DocIDFieldName)
delete(docMap, request.NewDocIDFieldName)
- doc, err := client.NewDocFromMap(docMap, col.Schema())
+ doc, err := client.NewDocFromMap(docMap, col.Definition())
if err != nil {
return NewErrDocFromMap(err)
}
- err = col.WithTxn(txn).Create(ctx, doc)
+ err = col.Create(ctx, doc)
if err != nil {
return NewErrDocCreate(err)
}
@@ -101,7 +100,7 @@ func (db *db) basicImport(ctx context.Context, txn datastore.Txn, filepath strin
if err != nil {
return NewErrDocUpdate(err)
}
- err = col.WithTxn(txn).Update(ctx, doc)
+ err = col.Update(ctx, doc)
if err != nil {
return NewErrDocUpdate(err)
}
@@ -116,19 +115,19 @@ func (db *db) basicImport(ctx context.Context, txn datastore.Txn, filepath strin
return nil
}
-func (db *db) basicExport(ctx context.Context, txn datastore.Txn, config *client.BackupConfig) (err error) {
+func (db *db) basicExport(ctx context.Context, config *client.BackupConfig) (err error) {
// old key -> new Key
keyChangeCache := map[string]string{}
cols := []client.Collection{}
if len(config.Collections) == 0 {
- cols, err = db.getCollections(ctx, txn, client.CollectionFetchOptions{})
+ cols, err = db.getCollections(ctx, client.CollectionFetchOptions{})
if err != nil {
return NewErrFailedToGetAllCollections(err)
}
} else {
for _, colName := range config.Collections {
- col, err := db.getCollectionByName(ctx, txn, colName)
+ col, err := db.getCollectionByName(ctx, colName)
if err != nil {
return NewErrFailedToGetCollection(colName, err)
}
@@ -188,8 +187,7 @@ func (db *db) basicExport(ctx context.Context, txn datastore.Txn, config *client
if err != nil {
return err
}
- colTxn := col.WithTxn(txn)
- docIDsCh, err := colTxn.GetAllDocIDs(ctx)
+ docIDsCh, err := col.GetAllDocIDs(ctx)
if err != nil {
return err
}
@@ -205,7 +203,7 @@ func (db *db) basicExport(ctx context.Context, txn datastore.Txn, config *client
return err
}
}
- doc, err := colTxn.Get(ctx, docResultWithID.ID, false)
+ doc, err := col.Get(ctx, docResultWithID.ID, false)
if err != nil {
return err
}
@@ -214,9 +212,8 @@ func (db *db) basicExport(ctx context.Context, txn datastore.Txn, config *client
refFieldName := ""
// replace any foreign key if it needs to be changed
for _, field := range col.Schema().Fields {
- switch field.Kind {
- case client.FieldKind_FOREIGN_OBJECT:
- if _, ok := colNameCache[field.Schema]; !ok {
+ if field.Kind.IsObject() && !field.Kind.IsArray() {
+ if _, ok := colNameCache[field.Kind.Underlying()]; !ok {
continue
}
if foreignKey, err := doc.Get(field.Name + request.RelatedObjectID); err == nil {
@@ -230,9 +227,9 @@ func (db *db) basicExport(ctx context.Context, txn datastore.Txn, config *client
refFieldName = field.Name + request.RelatedObjectID
}
} else {
- foreignCol, err := db.getCollectionByName(ctx, txn, field.Schema)
+ foreignCol, err := db.getCollectionByName(ctx, field.Kind.Underlying())
if err != nil {
- return NewErrFailedToGetCollection(field.Schema, err)
+ return NewErrFailedToGetCollection(field.Kind.Underlying(), err)
}
foreignDocID, err := client.NewDocIDFromString(foreignKey.(string))
if err != nil {
@@ -260,7 +257,7 @@ func (db *db) basicExport(ctx context.Context, txn datastore.Txn, config *client
refFieldName = field.Name + request.RelatedObjectID
}
- newForeignDoc, err := client.NewDocFromMap(oldForeignDoc, foreignCol.Schema())
+ newForeignDoc, err := client.NewDocFromMap(oldForeignDoc, foreignCol.Definition())
if err != nil {
return err
}
@@ -291,7 +288,7 @@ func (db *db) basicExport(ctx context.Context, txn datastore.Txn, config *client
delete(docM, refFieldName)
}
- newDoc, err := client.NewDocFromMap(docM, col.Schema())
+ newDoc, err := client.NewDocFromMap(docM, col.Definition())
if err != nil {
return err
}
diff --git a/db/backup_test.go b/db/backup_test.go
index 093b1a1a3f..486080db81 100644
--- a/db/backup_test.go
+++ b/db/backup_test.go
@@ -18,6 +18,7 @@ import (
"github.com/stretchr/testify/require"
+ acpIdentity "github.com/sourcenetwork/defradb/acp/identity"
"github.com/sourcenetwork/defradb/client"
)
@@ -40,10 +41,10 @@ func TestBasicExport_WithNormalFormatting_NoError(t *testing.T) {
col1, err := db.GetCollectionByName(ctx, "User")
require.NoError(t, err)
- doc1, err := client.NewDocFromJSON([]byte(`{"name": "John", "age": 30}`), col1.Schema())
+ doc1, err := client.NewDocFromJSON([]byte(`{"name": "John", "age": 30}`), col1.Definition())
require.NoError(t, err)
- doc2, err := client.NewDocFromJSON([]byte(`{"name": "Bob", "age": 40}`), col1.Schema())
+ doc2, err := client.NewDocFromJSON([]byte(`{"name": "Bob", "age": 40}`), col1.Definition())
require.NoError(t, err)
err = col1.Create(ctx, doc1)
@@ -55,7 +56,7 @@ func TestBasicExport_WithNormalFormatting_NoError(t *testing.T) {
col2, err := db.GetCollectionByName(ctx, "Address")
require.NoError(t, err)
- doc3, err := client.NewDocFromJSON([]byte(`{"street": "101 Maple St", "city": "Toronto"}`), col2.Schema())
+ doc3, err := client.NewDocFromJSON([]byte(`{"street": "101 Maple St", "city": "Toronto"}`), col2.Definition())
require.NoError(t, err)
err = col2.Create(ctx, doc3)
@@ -65,8 +66,11 @@ func TestBasicExport_WithNormalFormatting_NoError(t *testing.T) {
require.NoError(t, err)
defer txn.Discard(ctx)
+ ctx = SetContextIdentity(ctx, acpIdentity.None)
+ ctx = SetContextTxn(ctx, txn)
+
filepath := t.TempDir() + "/test.json"
- err = db.basicExport(ctx, txn, &client.BackupConfig{Filepath: filepath})
+ err = db.basicExport(ctx, &client.BackupConfig{Filepath: filepath})
require.NoError(t, err)
b, err := os.ReadFile(filepath)
@@ -102,10 +106,10 @@ func TestBasicExport_WithPrettyFormatting_NoError(t *testing.T) {
col1, err := db.GetCollectionByName(ctx, "User")
require.NoError(t, err)
- doc1, err := client.NewDocFromJSON([]byte(`{"name": "John", "age": 30}`), col1.Schema())
+ doc1, err := client.NewDocFromJSON([]byte(`{"name": "John", "age": 30}`), col1.Definition())
require.NoError(t, err)
- doc2, err := client.NewDocFromJSON([]byte(`{"name": "Bob", "age": 40}`), col1.Schema())
+ doc2, err := client.NewDocFromJSON([]byte(`{"name": "Bob", "age": 40}`), col1.Definition())
require.NoError(t, err)
err = col1.Create(ctx, doc1)
@@ -117,7 +121,7 @@ func TestBasicExport_WithPrettyFormatting_NoError(t *testing.T) {
col2, err := db.GetCollectionByName(ctx, "Address")
require.NoError(t, err)
- doc3, err := client.NewDocFromJSON([]byte(`{"street": "101 Maple St", "city": "Toronto"}`), col2.Schema())
+ doc3, err := client.NewDocFromJSON([]byte(`{"street": "101 Maple St", "city": "Toronto"}`), col2.Definition())
require.NoError(t, err)
err = col2.Create(ctx, doc3)
@@ -127,8 +131,11 @@ func TestBasicExport_WithPrettyFormatting_NoError(t *testing.T) {
require.NoError(t, err)
defer txn.Discard(ctx)
+ ctx = SetContextIdentity(ctx, acpIdentity.None)
+ ctx = SetContextTxn(ctx, txn)
+
filepath := t.TempDir() + "/test.json"
- err = db.basicExport(ctx, txn, &client.BackupConfig{Filepath: filepath, Pretty: true})
+ err = db.basicExport(ctx, &client.BackupConfig{Filepath: filepath, Pretty: true})
require.NoError(t, err)
b, err := os.ReadFile(filepath)
@@ -164,10 +171,10 @@ func TestBasicExport_WithSingleCollection_NoError(t *testing.T) {
col1, err := db.GetCollectionByName(ctx, "User")
require.NoError(t, err)
- doc1, err := client.NewDocFromJSON([]byte(`{"name": "John", "age": 30}`), col1.Schema())
+ doc1, err := client.NewDocFromJSON([]byte(`{"name": "John", "age": 30}`), col1.Definition())
require.NoError(t, err)
- doc2, err := client.NewDocFromJSON([]byte(`{"name": "Bob", "age": 40}`), col1.Schema())
+ doc2, err := client.NewDocFromJSON([]byte(`{"name": "Bob", "age": 40}`), col1.Definition())
require.NoError(t, err)
err = col1.Create(ctx, doc1)
@@ -179,7 +186,7 @@ func TestBasicExport_WithSingleCollection_NoError(t *testing.T) {
col2, err := db.GetCollectionByName(ctx, "Address")
require.NoError(t, err)
- doc3, err := client.NewDocFromJSON([]byte(`{"street": "101 Maple St", "city": "Toronto"}`), col2.Schema())
+ doc3, err := client.NewDocFromJSON([]byte(`{"street": "101 Maple St", "city": "Toronto"}`), col2.Definition())
require.NoError(t, err)
err = col2.Create(ctx, doc3)
@@ -189,8 +196,11 @@ func TestBasicExport_WithSingleCollection_NoError(t *testing.T) {
require.NoError(t, err)
defer txn.Discard(ctx)
+ ctx = SetContextIdentity(ctx, acpIdentity.None)
+ ctx = SetContextTxn(ctx, txn)
+
filepath := t.TempDir() + "/test.json"
- err = db.basicExport(ctx, txn, &client.BackupConfig{Filepath: filepath, Collections: []string{"Address"}})
+ err = db.basicExport(ctx, &client.BackupConfig{Filepath: filepath, Collections: []string{"Address"}})
require.NoError(t, err)
b, err := os.ReadFile(filepath)
@@ -227,10 +237,10 @@ func TestBasicExport_WithMultipleCollectionsAndUpdate_NoError(t *testing.T) {
col1, err := db.GetCollectionByName(ctx, "User")
require.NoError(t, err)
- doc1, err := client.NewDocFromJSON([]byte(`{"name": "John", "age": 30}`), col1.Schema())
+ doc1, err := client.NewDocFromJSON([]byte(`{"name": "John", "age": 30}`), col1.Definition())
require.NoError(t, err)
- doc2, err := client.NewDocFromJSON([]byte(`{"name": "Bob", "age": 31}`), col1.Schema())
+ doc2, err := client.NewDocFromJSON([]byte(`{"name": "Bob", "age": 31}`), col1.Definition())
require.NoError(t, err)
err = col1.Create(ctx, doc1)
@@ -242,10 +252,10 @@ func TestBasicExport_WithMultipleCollectionsAndUpdate_NoError(t *testing.T) {
col2, err := db.GetCollectionByName(ctx, "Book")
require.NoError(t, err)
- doc3, err := client.NewDocFromJSON([]byte(`{"name": "John and the sourcerers' stone", "author": "bae-e933420a-988a-56f8-8952-6c245aebd519"}`), col2.Schema())
+ doc3, err := client.NewDocFromJSON([]byte(`{"name": "John and the sourcerers' stone", "author": "bae-e933420a-988a-56f8-8952-6c245aebd519"}`), col2.Definition())
require.NoError(t, err)
- doc4, err := client.NewDocFromJSON([]byte(`{"name": "Game of chains", "author": "bae-e933420a-988a-56f8-8952-6c245aebd519"}`), col2.Schema())
+ doc4, err := client.NewDocFromJSON([]byte(`{"name": "Game of chains", "author": "bae-e933420a-988a-56f8-8952-6c245aebd519"}`), col2.Definition())
require.NoError(t, err)
err = col2.Create(ctx, doc3)
@@ -263,8 +273,11 @@ func TestBasicExport_WithMultipleCollectionsAndUpdate_NoError(t *testing.T) {
require.NoError(t, err)
defer txn.Discard(ctx)
+ ctx = SetContextIdentity(ctx, acpIdentity.None)
+ ctx = SetContextTxn(ctx, txn)
+
filepath := t.TempDir() + "/test.json"
- err = db.basicExport(ctx, txn, &client.BackupConfig{Filepath: filepath})
+ err = db.basicExport(ctx, &client.BackupConfig{Filepath: filepath})
require.NoError(t, err)
b, err := os.ReadFile(filepath)
@@ -300,10 +313,10 @@ func TestBasicExport_EnsureFileOverwrite_NoError(t *testing.T) {
col1, err := db.GetCollectionByName(ctx, "User")
require.NoError(t, err)
- doc1, err := client.NewDocFromJSON([]byte(`{"name": "John", "age": 30}`), col1.Schema())
+ doc1, err := client.NewDocFromJSON([]byte(`{"name": "John", "age": 30}`), col1.Definition())
require.NoError(t, err)
- doc2, err := client.NewDocFromJSON([]byte(`{"name": "Bob", "age": 40}`), col1.Schema())
+ doc2, err := client.NewDocFromJSON([]byte(`{"name": "Bob", "age": 40}`), col1.Definition())
require.NoError(t, err)
err = col1.Create(ctx, doc1)
@@ -315,7 +328,7 @@ func TestBasicExport_EnsureFileOverwrite_NoError(t *testing.T) {
col2, err := db.GetCollectionByName(ctx, "Address")
require.NoError(t, err)
- doc3, err := client.NewDocFromJSON([]byte(`{"street": "101 Maple St", "city": "Toronto"}`), col2.Schema())
+ doc3, err := client.NewDocFromJSON([]byte(`{"street": "101 Maple St", "city": "Toronto"}`), col2.Definition())
require.NoError(t, err)
err = col2.Create(ctx, doc3)
@@ -325,6 +338,9 @@ func TestBasicExport_EnsureFileOverwrite_NoError(t *testing.T) {
require.NoError(t, err)
defer txn.Discard(ctx)
+ ctx = SetContextIdentity(ctx, acpIdentity.None)
+ ctx = SetContextTxn(ctx, txn)
+
filepath := t.TempDir() + "/test.json"
err = os.WriteFile(
@@ -334,7 +350,7 @@ func TestBasicExport_EnsureFileOverwrite_NoError(t *testing.T) {
)
require.NoError(t, err)
- err = db.basicExport(ctx, txn, &client.BackupConfig{Filepath: filepath, Collections: []string{"Address"}})
+ err = db.basicExport(ctx, &client.BackupConfig{Filepath: filepath, Collections: []string{"Address"}})
require.NoError(t, err)
b, err := os.ReadFile(filepath)
@@ -370,6 +386,9 @@ func TestBasicImport_WithMultipleCollectionsAndObjects_NoError(t *testing.T) {
txn, err := db.NewTxn(ctx, false)
require.NoError(t, err)
+ ctx = SetContextIdentity(ctx, acpIdentity.None)
+ ctx = SetContextTxn(ctx, txn)
+
filepath := t.TempDir() + "/test.json"
err = os.WriteFile(
@@ -379,7 +398,7 @@ func TestBasicImport_WithMultipleCollectionsAndObjects_NoError(t *testing.T) {
)
require.NoError(t, err)
- err = db.basicImport(ctx, txn, filepath)
+ err = db.basicImport(ctx, filepath)
require.NoError(t, err)
err = txn.Commit(ctx)
require.NoError(t, err)
@@ -387,7 +406,10 @@ func TestBasicImport_WithMultipleCollectionsAndObjects_NoError(t *testing.T) {
txn, err = db.NewTxn(ctx, true)
require.NoError(t, err)
- col1, err := db.getCollectionByName(ctx, txn, "Address")
+ ctx = SetContextIdentity(ctx, acpIdentity.None)
+ ctx = SetContextTxn(ctx, txn)
+
+ col1, err := db.getCollectionByName(ctx, "Address")
require.NoError(t, err)
key1, err := client.NewDocIDFromString("bae-8096f2c1-ea4c-5226-8ba5-17fc4b68ac1f")
@@ -395,7 +417,7 @@ func TestBasicImport_WithMultipleCollectionsAndObjects_NoError(t *testing.T) {
_, err = col1.Get(ctx, key1, false)
require.NoError(t, err)
- col2, err := db.getCollectionByName(ctx, txn, "User")
+ col2, err := db.getCollectionByName(ctx, "User")
require.NoError(t, err)
key2, err := client.NewDocIDFromString("bae-b94880d1-e6d2-542f-b9e0-5a369fafd0df")
@@ -428,6 +450,7 @@ func TestBasicImport_WithJSONArray_ReturnError(t *testing.T) {
txn, err := db.NewTxn(ctx, false)
require.NoError(t, err)
+ ctx = SetContextTxn(ctx, txn)
filepath := t.TempDir() + "/test.json"
@@ -438,7 +461,7 @@ func TestBasicImport_WithJSONArray_ReturnError(t *testing.T) {
)
require.NoError(t, err)
- err = db.basicImport(ctx, txn, filepath)
+ err = db.basicImport(ctx, filepath)
require.ErrorIs(t, err, ErrExpectedJSONObject)
err = txn.Commit(ctx)
require.NoError(t, err)
@@ -463,6 +486,7 @@ func TestBasicImport_WithObjectCollection_ReturnError(t *testing.T) {
txn, err := db.NewTxn(ctx, false)
require.NoError(t, err)
+ ctx = SetContextTxn(ctx, txn)
filepath := t.TempDir() + "/test.json"
@@ -473,7 +497,7 @@ func TestBasicImport_WithObjectCollection_ReturnError(t *testing.T) {
)
require.NoError(t, err)
- err = db.basicImport(ctx, txn, filepath)
+ err = db.basicImport(ctx, filepath)
require.ErrorIs(t, err, ErrExpectedJSONArray)
err = txn.Commit(ctx)
require.NoError(t, err)
@@ -498,6 +522,7 @@ func TestBasicImport_WithInvalidFilepath_ReturnError(t *testing.T) {
txn, err := db.NewTxn(ctx, false)
require.NoError(t, err)
+ ctx = SetContextTxn(ctx, txn)
filepath := t.TempDir() + "/test.json"
@@ -509,7 +534,7 @@ func TestBasicImport_WithInvalidFilepath_ReturnError(t *testing.T) {
require.NoError(t, err)
wrongFilepath := t.TempDir() + "/some/test.json"
- err = db.basicImport(ctx, txn, wrongFilepath)
+ err = db.basicImport(ctx, wrongFilepath)
require.ErrorIs(t, err, os.ErrNotExist)
err = txn.Commit(ctx)
require.NoError(t, err)
@@ -534,6 +559,7 @@ func TestBasicImport_WithInvalidCollection_ReturnError(t *testing.T) {
txn, err := db.NewTxn(ctx, false)
require.NoError(t, err)
+ ctx = SetContextTxn(ctx, txn)
filepath := t.TempDir() + "/test.json"
@@ -544,7 +570,7 @@ func TestBasicImport_WithInvalidCollection_ReturnError(t *testing.T) {
)
require.NoError(t, err)
- err = db.basicImport(ctx, txn, filepath)
+ err = db.basicImport(ctx, filepath)
require.ErrorIs(t, err, ErrFailedToGetCollection)
err = txn.Commit(ctx)
require.NoError(t, err)
diff --git a/db/base/collection_keys.go b/db/base/collection_keys.go
index 1277b96a81..98584454ab 100644
--- a/db/base/collection_keys.go
+++ b/db/base/collection_keys.go
@@ -47,7 +47,7 @@ func MakePrimaryIndexKeyForCRDT(
WithInstanceInfo(key).
WithFieldId(core.COMPOSITE_NAMESPACE),
nil
- case client.LWW_REGISTER, client.PN_COUNTER:
+ case client.LWW_REGISTER, client.PN_COUNTER, client.P_COUNTER:
field, ok := c.GetFieldByName(fieldName)
if !ok {
return core.DataStoreKey{}, client.NewErrFieldNotExist(fieldName)
diff --git a/db/collection.go b/db/collection.go
index c9d311f01a..e84530d3e7 100644
--- a/db/collection.go
+++ b/db/collection.go
@@ -13,10 +13,13 @@ package db
import (
"bytes"
"context"
+ "encoding/json"
"fmt"
+ "reflect"
"strconv"
"strings"
+ jsonpatch "github.com/evanphx/json-patch/v5"
"github.com/ipfs/go-cid"
ds "github.com/ipfs/go-datastore"
"github.com/ipfs/go-datastore/query"
@@ -24,10 +27,10 @@ import (
"github.com/lens-vm/lens/host-go/config/model"
"github.com/sourcenetwork/immutable"
+ "github.com/sourcenetwork/defradb/acp"
"github.com/sourcenetwork/defradb/client"
"github.com/sourcenetwork/defradb/client/request"
"github.com/sourcenetwork/defradb/core"
- "github.com/sourcenetwork/defradb/datastore"
"github.com/sourcenetwork/defradb/db/base"
"github.com/sourcenetwork/defradb/db/description"
"github.com/sourcenetwork/defradb/db/fetcher"
@@ -42,18 +45,8 @@ var _ client.Collection = (*collection)(nil)
// collection stores data records at Documents, which are gathered
// together under a collection name. This is analogous to SQL Tables.
type collection struct {
- db *db
-
- // txn represents any externally provided [datastore.Txn] for which any
- // operation on this [collection] instance should be scoped to.
- //
- // If this has no value, operations requiring a transaction should use an
- // implicit internally managed transaction, which only lives for duration
- // of the operation in question.
- txn immutable.Option[datastore.Txn]
-
- def client.CollectionDefinition
-
+ db *db
+ def client.CollectionDefinition
indexes []CollectionIndex
fetcherFactory func() fetcher.Fetcher
}
@@ -91,11 +84,12 @@ func (c *collection) newFetcher() fetcher.Fetcher {
// Note: Collection.ID is an auto-incrementing value that is generated by the database.
func (db *db) createCollection(
ctx context.Context,
- txn datastore.Txn,
def client.CollectionDefinition,
+ newDefinitions []client.CollectionDefinition,
) (client.Collection, error) {
schema := def.Schema
desc := def.Description
+ txn := mustGetContextTxn(ctx)
if desc.Name.HasValue() {
exists, err := description.HasCollectionByName(ctx, txn, desc.Name.Value())
@@ -107,16 +101,46 @@ func (db *db) createCollection(
}
}
- colSeq, err := db.getSequence(ctx, txn, core.CollectionIDSequenceKey{})
+ existingDefinitions, err := db.getAllActiveDefinitions(ctx)
+ if err != nil {
+ return nil, err
+ }
+
+ schemaByName := map[string]client.SchemaDescription{}
+ for _, existingDefinition := range existingDefinitions {
+ schemaByName[existingDefinition.Schema.Name] = existingDefinition.Schema
+ }
+ for _, newDefinition := range newDefinitions {
+ schemaByName[newDefinition.Schema.Name] = newDefinition.Schema
+ }
+
+ _, err = validateUpdateSchemaFields(schemaByName, client.SchemaDescription{}, schema)
+ if err != nil {
+ return nil, err
+ }
+
+ definitionsByName := map[string]client.CollectionDefinition{}
+ for _, existingDefinition := range existingDefinitions {
+ definitionsByName[existingDefinition.GetName()] = existingDefinition
+ }
+ for _, newDefinition := range newDefinitions {
+ definitionsByName[newDefinition.GetName()] = newDefinition
+ }
+ err = db.validateNewCollection(def, definitionsByName)
+ if err != nil {
+ return nil, err
+ }
+
+ colSeq, err := db.getSequence(ctx, core.CollectionIDSequenceKey{})
if err != nil {
return nil, err
}
- colID, err := colSeq.next(ctx, txn)
+ colID, err := colSeq.next(ctx)
if err != nil {
return nil, err
}
- fieldSeq, err := db.getSequence(ctx, txn, core.NewFieldIDSequenceKey(uint32(colID)))
+ fieldSeq, err := db.getSequence(ctx, core.NewFieldIDSequenceKey(uint32(colID)))
if err != nil {
return nil, err
}
@@ -129,27 +153,26 @@ func (db *db) createCollection(
return nil, err
}
desc.SchemaVersionID = schema.VersionID
- for _, globalField := range schema.Fields {
+ for _, localField := range desc.Fields {
var fieldID uint64
- if globalField.Name == request.DocIDFieldName {
+ if localField.Name == request.DocIDFieldName {
// There is no hard technical requirement for this, we just think it looks nicer
// if the doc id is at the zero index. It makes it look a little nicer in commit
// queries too.
fieldID = 0
} else {
- fieldID, err = fieldSeq.next(ctx, txn)
+ fieldID, err = fieldSeq.next(ctx)
if err != nil {
return nil, err
}
}
- desc.Fields = append(
- desc.Fields,
- client.CollectionFieldDescription{
- Name: globalField.Name,
- ID: client.FieldID(fieldID),
- },
- )
+ for i := range desc.Fields {
+ if desc.Fields[i].Name == localField.Name {
+ desc.Fields[i].ID = client.FieldID(fieldID)
+ break
+ }
+ }
}
desc, err = description.SaveCollection(ctx, txn, desc)
@@ -158,13 +181,45 @@ func (db *db) createCollection(
}
col := db.newCollection(desc, schema)
+
for _, index := range desc.Indexes {
- if _, err := col.createIndex(ctx, txn, index); err != nil {
+ if _, err := col.createIndex(ctx, index); err != nil {
return nil, err
}
}
- return db.getCollectionByID(ctx, txn, desc.ID)
+ return db.getCollectionByID(ctx, desc.ID)
+}
+
+// validateCollectionDefinitionPolicyDesc validates that the policy definition is valid, beyond syntax.
+//
+// Ensures that the information within the policy definition makes sense,
+// this function might also make relevant remote calls using the acp system.
+func (db *db) validateCollectionDefinitionPolicyDesc(
+ ctx context.Context,
+ policyDesc immutable.Option[client.PolicyDescription],
+) error {
+ if !policyDesc.HasValue() {
+ // No policy validation needed, whether acp exists or not doesn't matter.
+ return nil
+ }
+
+ // If there is a policy specified, but the database does not have
+ // acp enabled/available return an error, database must have an acp available
+ // to enable access control (inorder to adhere to the policy specified).
+ if !db.acp.HasValue() {
+ return ErrCanNotHavePolicyWithoutACP
+ }
+
+ // If we have the policy specified on the collection, and acp is available/enabled,
+ // then using the acp system we need to ensure the policy id specified
+ // actually exists as a policy, and the resource name exists on that policy
+ // and that the resource is a valid DPI.
+ return db.acp.Value().ValidateResourceExistsOnValidDPI(
+ ctx,
+ policyDesc.Value().ID,
+ policyDesc.Value().ResourceName,
+ )
}
// updateSchema updates the persisted schema description matching the name of the given
@@ -177,7 +232,6 @@ func (db *db) createCollection(
// applied.
func (db *db) updateSchema(
ctx context.Context,
- txn datastore.Txn,
existingSchemaByName map[string]client.SchemaDescription,
proposedDescriptionsByName map[string]client.SchemaDescription,
schema client.SchemaDescription,
@@ -198,13 +252,12 @@ func (db *db) updateSchema(
}
for _, field := range schema.Fields {
- if field.Kind == client.FieldKind_FOREIGN_OBJECT {
+ if field.Kind.IsObject() && !field.Kind.IsArray() {
idFieldName := field.Name + "_id"
if _, ok := schema.GetFieldByName(idFieldName); !ok {
schema.Fields = append(schema.Fields, client.SchemaFieldDescription{
- Name: idFieldName,
- Kind: client.FieldKind_DocID,
- RelationName: field.RelationName,
+ Name: idFieldName,
+ Kind: client.FieldKind_DocID,
})
}
}
@@ -218,6 +271,7 @@ func (db *db) updateSchema(
}
}
+ txn := mustGetContextTxn(ctx)
previousVersionID := schema.VersionID
schema, err = description.CreateSchemaVersion(ctx, txn, schema)
if err != nil {
@@ -233,7 +287,7 @@ func (db *db) updateSchema(
return err
}
- colSeq, err := db.getSequence(ctx, txn, core.CollectionIDSequenceKey{})
+ colSeq, err := db.getSequence(ctx, core.CollectionIDSequenceKey{})
if err != nil {
return err
}
@@ -263,7 +317,7 @@ func (db *db) updateSchema(
existingCol.RootID = col.RootID
}
- fieldSeq, err := db.getSequence(ctx, txn, core.NewFieldIDSequenceKey(existingCol.RootID))
+ fieldSeq, err := db.getSequence(ctx, core.NewFieldIDSequenceKey(existingCol.RootID))
if err != nil {
return err
}
@@ -276,7 +330,7 @@ func (db *db) updateSchema(
if ok {
fieldID = existingField.ID
} else {
- nextFieldID, err := fieldSeq.next(ctx, txn)
+ nextFieldID, err := fieldSeq.next(ctx)
if err != nil {
return err
}
@@ -302,12 +356,12 @@ func (db *db) updateSchema(
}
if !isExistingCol {
- colID, err := colSeq.next(ctx, txn)
+ colID, err := colSeq.next(ctx)
if err != nil {
return err
}
- fieldSeq, err := db.getSequence(ctx, txn, core.NewFieldIDSequenceKey(col.RootID))
+ fieldSeq, err := db.getSequence(ctx, core.NewFieldIDSequenceKey(col.RootID))
if err != nil {
return err
}
@@ -327,7 +381,7 @@ func (db *db) updateSchema(
for _, globalField := range schema.Fields {
_, exists := col.GetFieldByName(globalField.Name)
if !exists {
- fieldID, err := fieldSeq.next(ctx, txn)
+ fieldID, err := fieldSeq.next(ctx)
if err != nil {
return err
}
@@ -359,7 +413,7 @@ func (db *db) updateSchema(
if setAsActiveVersion {
// activate collection versions using the new schema ID. This call must be made after
// all new collection versions have been saved.
- err = db.setActiveSchemaVersion(ctx, txn, schema.VersionID)
+ err = db.setActiveSchemaVersion(ctx, schema.VersionID)
if err != nil {
return err
}
@@ -433,65 +487,26 @@ func validateUpdateSchemaFields(
// If the field is new, then the collection has changed
hasChanged = hasChanged || !fieldAlreadyExists
- if !fieldAlreadyExists && (proposedField.Kind == client.FieldKind_FOREIGN_OBJECT ||
- proposedField.Kind == client.FieldKind_FOREIGN_OBJECT_ARRAY) {
- if proposedField.Schema == "" {
- return false, NewErrRelationalFieldMissingSchema(proposedField.Name, proposedField.Kind)
- }
-
- relatedDesc, relatedDescFound := descriptionsByName[proposedField.Schema]
+ if !fieldAlreadyExists && proposedField.Kind.IsObject() {
+ _, relatedDescFound := descriptionsByName[proposedField.Kind.Underlying()]
if !relatedDescFound {
- return false, NewErrSchemaNotFound(proposedField.Name, proposedField.Schema)
+ return false, NewErrFieldKindNotFound(proposedField.Name, proposedField.Kind.Underlying())
}
- if proposedField.RelationName == "" {
- return false, NewErrRelationalFieldMissingRelationName(proposedField.Name)
- }
-
- if proposedField.IsPrimaryRelation {
- if proposedField.Kind == client.FieldKind_FOREIGN_OBJECT_ARRAY {
- return false, NewErrPrimarySideOnMany(proposedField.Name)
- }
- }
-
- if proposedField.Kind == client.FieldKind_FOREIGN_OBJECT {
+ if proposedField.Kind.IsObject() && !proposedField.Kind.IsArray() {
idFieldName := proposedField.Name + request.RelatedObjectID
idField, idFieldFound := proposedDesc.GetFieldByName(idFieldName)
if idFieldFound {
if idField.Kind != client.FieldKind_DocID {
return false, NewErrRelationalFieldIDInvalidType(idField.Name, client.FieldKind_DocID, idField.Kind)
}
-
- if idField.RelationName == "" {
- return false, NewErrRelationalFieldMissingRelationName(idField.Name)
- }
}
}
+ }
- var relatedFieldFound bool
- var relatedField client.SchemaFieldDescription
- for _, field := range relatedDesc.Fields {
- if field.RelationName == proposedField.RelationName &&
- field.Kind != client.FieldKind_DocID &&
- !(relatedDesc.Name == proposedDesc.Name && field.Name == proposedField.Name) {
- relatedFieldFound = true
- relatedField = field
- break
- }
- }
-
- if !relatedFieldFound {
- return false, client.NewErrRelationOneSided(proposedField.Name, proposedField.Schema)
- }
-
- if !(proposedField.IsPrimaryRelation || relatedField.IsPrimaryRelation) {
- return false, NewErrPrimarySideNotDefined(proposedField.RelationName)
- }
-
- if proposedField.IsPrimaryRelation && relatedField.IsPrimaryRelation {
- return false, NewErrBothSidesPrimary(proposedField.RelationName)
- }
+ if proposedField.Kind.IsObjectArray() {
+ return false, NewErrSecondaryFieldOnSchema(proposedField.Name)
}
if _, isDuplicate := newFieldNames[proposedField.Name]; isDuplicate {
@@ -526,6 +541,513 @@ func validateUpdateSchemaFields(
return hasChanged, nil
}
+func (db *db) patchCollection(
+ ctx context.Context,
+ patchString string,
+) error {
+ patch, err := jsonpatch.DecodePatch([]byte(patchString))
+ if err != nil {
+ return err
+ }
+ txn := mustGetContextTxn(ctx)
+ cols, err := description.GetCollections(ctx, txn)
+ if err != nil {
+ return err
+ }
+
+ existingColsByID := map[uint32]client.CollectionDescription{}
+ for _, col := range cols {
+ existingColsByID[col.ID] = col
+ }
+
+ existingDescriptionJson, err := json.Marshal(existingColsByID)
+ if err != nil {
+ return err
+ }
+
+ newDescriptionJson, err := patch.Apply(existingDescriptionJson)
+ if err != nil {
+ return err
+ }
+
+ var newColsByID map[uint32]client.CollectionDescription
+ decoder := json.NewDecoder(strings.NewReader(string(newDescriptionJson)))
+ decoder.DisallowUnknownFields()
+ err = decoder.Decode(&newColsByID)
+ if err != nil {
+ return err
+ }
+
+ err = db.validateCollectionChanges(existingColsByID, newColsByID)
+ if err != nil {
+ return err
+ }
+
+ for _, col := range newColsByID {
+ _, err := description.SaveCollection(ctx, txn, col)
+ if err != nil {
+ return err
+ }
+
+ existingCol, ok := existingColsByID[col.ID]
+ if ok {
+ // Clear any existing migrations in the registry, using this semi-hacky way
+ // to avoid adding more functions to a public interface that we wish to remove.
+
+ for _, src := range existingCol.CollectionSources() {
+ if src.Transform.HasValue() {
+ err = db.LensRegistry().SetMigration(ctx, existingCol.ID, model.Lens{})
+ if err != nil {
+ return err
+ }
+ }
+ }
+ for _, src := range existingCol.QuerySources() {
+ if src.Transform.HasValue() {
+ err = db.LensRegistry().SetMigration(ctx, existingCol.ID, model.Lens{})
+ if err != nil {
+ return err
+ }
+ }
+ }
+ }
+
+ for _, src := range col.CollectionSources() {
+ if src.Transform.HasValue() {
+ err = db.LensRegistry().SetMigration(ctx, col.ID, src.Transform.Value())
+ if err != nil {
+ return err
+ }
+ }
+ }
+
+ for _, src := range col.QuerySources() {
+ if src.Transform.HasValue() {
+ err = db.LensRegistry().SetMigration(ctx, col.ID, src.Transform.Value())
+ if err != nil {
+ return err
+ }
+ }
+ }
+ }
+
+ return db.loadSchema(ctx)
+}
+
+var patchCollectionValidators = []func(
+ map[uint32]client.CollectionDescription,
+ map[uint32]client.CollectionDescription,
+) error{
+ validateCollectionNameUnique,
+ validateSingleVersionActive,
+ validateSourcesNotRedefined,
+ validateIndexesNotModified,
+ validateFieldsNotModified,
+ validatePolicyNotModified,
+ validateIDNotZero,
+ validateIDUnique,
+ validateIDExists,
+ validateRootIDNotMutated,
+ validateSchemaVersionIDNotMutated,
+ validateCollectionNotRemoved,
+}
+
+func (db *db) validateCollectionChanges(
+ oldColsByID map[uint32]client.CollectionDescription,
+ newColsByID map[uint32]client.CollectionDescription,
+) error {
+ for _, validators := range patchCollectionValidators {
+ err := validators(oldColsByID, newColsByID)
+ if err != nil {
+ return err
+ }
+ }
+
+ return nil
+}
+
+var newCollectionValidators = []func(
+ client.CollectionDefinition,
+ map[string]client.CollectionDefinition,
+) error{
+ validateSecondaryFieldsPairUp,
+ validateRelationPointsToValidKind,
+ validateSingleSidePrimary,
+}
+
+func (db *db) validateNewCollection(
+ def client.CollectionDefinition,
+ defsByName map[string]client.CollectionDefinition,
+) error {
+ for _, validators := range newCollectionValidators {
+ err := validators(def, defsByName)
+ if err != nil {
+ return err
+ }
+ }
+
+ return nil
+}
+
+func validateRelationPointsToValidKind(
+ def client.CollectionDefinition,
+ defsByName map[string]client.CollectionDefinition,
+) error {
+ for _, field := range def.Description.Fields {
+ if !field.Kind.HasValue() {
+ continue
+ }
+
+ if !field.Kind.Value().IsObject() {
+ continue
+ }
+
+ underlying := field.Kind.Value().Underlying()
+ _, ok := defsByName[underlying]
+ if !ok {
+ return NewErrFieldKindNotFound(field.Name, underlying)
+ }
+ }
+
+ return nil
+}
+
+func validateSecondaryFieldsPairUp(
+ def client.CollectionDefinition,
+ defsByName map[string]client.CollectionDefinition,
+) error {
+ for _, field := range def.Description.Fields {
+ if !field.Kind.HasValue() {
+ continue
+ }
+
+ if !field.Kind.Value().IsObject() {
+ continue
+ }
+
+ if !field.RelationName.HasValue() {
+ continue
+ }
+
+ _, hasSchemaField := def.Schema.GetFieldByName(field.Name)
+ if hasSchemaField {
+ continue
+ }
+
+ underlying := field.Kind.Value().Underlying()
+ otherDef, ok := defsByName[underlying]
+ if !ok {
+ continue
+ }
+
+ if len(otherDef.Description.Fields) == 0 {
+ // Views/embedded objects do not require both sides of the relation to be defined.
+ continue
+ }
+
+ otherField, ok := otherDef.Description.GetFieldByRelation(
+ field.RelationName.Value(),
+ def.GetName(),
+ field.Name,
+ )
+ if !ok {
+ return NewErrRelationMissingField(underlying, field.RelationName.Value())
+ }
+
+ _, ok = otherDef.Schema.GetFieldByName(otherField.Name)
+ if !ok {
+ // This secondary is paired with another secondary, which is invalid
+ return NewErrRelationMissingField(underlying, field.RelationName.Value())
+ }
+ }
+
+ return nil
+}
+
+func validateSingleSidePrimary(
+ def client.CollectionDefinition,
+ defsByName map[string]client.CollectionDefinition,
+) error {
+ for _, field := range def.Description.Fields {
+ if !field.Kind.HasValue() {
+ continue
+ }
+
+ if !field.Kind.Value().IsObject() {
+ continue
+ }
+
+ if !field.RelationName.HasValue() {
+ continue
+ }
+
+ _, hasSchemaField := def.Schema.GetFieldByName(field.Name)
+ if !hasSchemaField {
+ // This is a secondary field and thus passes this rule
+ continue
+ }
+
+ underlying := field.Kind.Value().Underlying()
+ otherDef, ok := defsByName[underlying]
+ if !ok {
+ continue
+ }
+
+ otherField, ok := otherDef.Description.GetFieldByRelation(
+ field.RelationName.Value(),
+ def.GetName(),
+ field.Name,
+ )
+ if !ok {
+ // This must be a one-sided relation, in which case it passes this rule
+ continue
+ }
+
+ _, ok = otherDef.Schema.GetFieldByName(otherField.Name)
+ if ok {
+ // This primary is paired with another primary, which is invalid
+ return ErrMultipleRelationPrimaries
+ }
+ }
+
+ return nil
+}
+
+func validateCollectionNameUnique(
+ oldColsByID map[uint32]client.CollectionDescription,
+ newColsByID map[uint32]client.CollectionDescription,
+) error {
+ names := map[string]struct{}{}
+ for _, col := range newColsByID {
+ if !col.Name.HasValue() {
+ continue
+ }
+
+ if _, ok := names[col.Name.Value()]; ok {
+ return NewErrCollectionAlreadyExists(col.Name.Value())
+ }
+ names[col.Name.Value()] = struct{}{}
+ }
+
+ return nil
+}
+
+func validateSingleVersionActive(
+ oldColsByID map[uint32]client.CollectionDescription,
+ newColsByID map[uint32]client.CollectionDescription,
+) error {
+ rootsWithActiveCol := map[uint32]struct{}{}
+ for _, col := range newColsByID {
+ if !col.Name.HasValue() {
+ continue
+ }
+
+ if _, ok := rootsWithActiveCol[col.RootID]; ok {
+ return NewErrMultipleActiveCollectionVersions(col.Name.Value(), col.RootID)
+ }
+ rootsWithActiveCol[col.RootID] = struct{}{}
+ }
+
+ return nil
+}
+
+// validateSourcesNotRedefined specifies the limitations on how the collection sources
+// can be mutated.
+//
+// Currently new sources cannot be added, existing cannot be removed, and CollectionSources
+// cannot be redirected to other collections.
+func validateSourcesNotRedefined(
+ oldColsByID map[uint32]client.CollectionDescription,
+ newColsByID map[uint32]client.CollectionDescription,
+) error {
+ for _, newCol := range newColsByID {
+ oldCol, ok := oldColsByID[newCol.ID]
+ if !ok {
+ continue
+ }
+
+ newColSources := newCol.CollectionSources()
+ oldColSources := oldCol.CollectionSources()
+
+ if len(newColSources) != len(oldColSources) {
+ return NewErrCollectionSourcesCannotBeAddedRemoved(newCol.ID)
+ }
+
+ for i := range newColSources {
+ if newColSources[i].SourceCollectionID != oldColSources[i].SourceCollectionID {
+ return NewErrCollectionSourceIDMutated(
+ newCol.ID,
+ newColSources[i].SourceCollectionID,
+ oldColSources[i].SourceCollectionID,
+ )
+ }
+ }
+
+ newQuerySources := newCol.QuerySources()
+ oldQuerySources := oldCol.QuerySources()
+
+ if len(newQuerySources) != len(oldQuerySources) {
+ return NewErrCollectionSourcesCannotBeAddedRemoved(newCol.ID)
+ }
+ }
+
+ return nil
+}
+
+func validateIndexesNotModified(
+ oldColsByID map[uint32]client.CollectionDescription,
+ newColsByID map[uint32]client.CollectionDescription,
+) error {
+ for _, newCol := range newColsByID {
+ oldCol, ok := oldColsByID[newCol.ID]
+ if !ok {
+ continue
+ }
+
+ // DeepEqual is temporary, as this validation is temporary
+ if !reflect.DeepEqual(oldCol.Indexes, newCol.Indexes) {
+ return NewErrCollectionIndexesCannotBeMutated(newCol.ID)
+ }
+ }
+
+ return nil
+}
+
+func validateFieldsNotModified(
+ oldColsByID map[uint32]client.CollectionDescription,
+ newColsByID map[uint32]client.CollectionDescription,
+) error {
+ for _, newCol := range newColsByID {
+ oldCol, ok := oldColsByID[newCol.ID]
+ if !ok {
+ continue
+ }
+
+ // DeepEqual is temporary, as this validation is temporary
+ if !reflect.DeepEqual(oldCol.Fields, newCol.Fields) {
+ return NewErrCollectionFieldsCannotBeMutated(newCol.ID)
+ }
+ }
+
+ return nil
+}
+
+func validatePolicyNotModified(
+ oldColsByID map[uint32]client.CollectionDescription,
+ newColsByID map[uint32]client.CollectionDescription,
+) error {
+ for _, newCol := range newColsByID {
+ oldCol, ok := oldColsByID[newCol.ID]
+ if !ok {
+ continue
+ }
+
+ // DeepEqual is temporary, as this validation is temporary
+ if !reflect.DeepEqual(oldCol.Policy, newCol.Policy) {
+ return NewErrCollectionPolicyCannotBeMutated(newCol.ID)
+ }
+ }
+
+ return nil
+}
+
+func validateIDNotZero(
+ oldColsByID map[uint32]client.CollectionDescription,
+ newColsByID map[uint32]client.CollectionDescription,
+) error {
+ for _, newCol := range newColsByID {
+ if newCol.ID == 0 {
+ return ErrCollectionIDCannotBeZero
+ }
+ }
+
+ return nil
+}
+
+func validateIDUnique(
+ oldColsByID map[uint32]client.CollectionDescription,
+ newColsByID map[uint32]client.CollectionDescription,
+) error {
+ colIds := map[uint32]struct{}{}
+ for _, newCol := range newColsByID {
+ if _, ok := colIds[newCol.ID]; ok {
+ return NewErrCollectionIDAlreadyExists(newCol.ID)
+ }
+ colIds[newCol.ID] = struct{}{}
+ }
+
+ return nil
+}
+
+func validateIDExists(
+ oldColsByID map[uint32]client.CollectionDescription,
+ newColsByID map[uint32]client.CollectionDescription,
+) error {
+ for _, newCol := range newColsByID {
+ if _, ok := oldColsByID[newCol.ID]; !ok {
+ return NewErrAddCollectionIDWithPatch(newCol.ID)
+ }
+ }
+
+ return nil
+}
+
+func validateRootIDNotMutated(
+ oldColsByID map[uint32]client.CollectionDescription,
+ newColsByID map[uint32]client.CollectionDescription,
+) error {
+ for _, newCol := range newColsByID {
+ oldCol, ok := oldColsByID[newCol.ID]
+ if !ok {
+ continue
+ }
+
+ if newCol.RootID != oldCol.RootID {
+ return NewErrCollectionRootIDCannotBeMutated(newCol.ID)
+ }
+ }
+
+ return nil
+}
+
+func validateSchemaVersionIDNotMutated(
+ oldColsByID map[uint32]client.CollectionDescription,
+ newColsByID map[uint32]client.CollectionDescription,
+) error {
+ for _, newCol := range newColsByID {
+ oldCol, ok := oldColsByID[newCol.ID]
+ if !ok {
+ continue
+ }
+
+ if newCol.SchemaVersionID != oldCol.SchemaVersionID {
+ return NewErrCollectionSchemaVersionIDCannotBeMutated(newCol.ID)
+ }
+ }
+
+ return nil
+}
+
+func validateCollectionNotRemoved(
+ oldColsByID map[uint32]client.CollectionDescription,
+ newColsByID map[uint32]client.CollectionDescription,
+) error {
+oldLoop:
+ for _, oldCol := range oldColsByID {
+ for _, newCol := range newColsByID {
+ // It is not enough to just match by the map index, in case the index does not pair
+ // up with the ID (this can happen if a user moves the collection within the map)
+ if newCol.ID == oldCol.ID {
+ continue oldLoop
+ }
+ }
+
+ return NewErrCollectionsCannotBeDeleted(oldCol.ID)
+ }
+
+ return nil
+}
+
// SetActiveSchemaVersion activates all collection versions with the given schema version, and deactivates all
// those without it (if they share the same schema root).
//
@@ -535,13 +1057,12 @@ func validateUpdateSchemaFields(
// It will return an error if the provided schema version ID does not exist.
func (db *db) setActiveSchemaVersion(
ctx context.Context,
- txn datastore.Txn,
schemaVersionID string,
) error {
if schemaVersionID == "" {
return ErrSchemaVersionIDEmpty
}
-
+ txn := mustGetContextTxn(ctx)
cols, err := description.GetCollectionsBySchemaVersionID(ctx, txn, schemaVersionID)
if err != nil {
return err
@@ -585,11 +1106,11 @@ func (db *db) setActiveSchemaVersion(
if len(sources) > 0 {
// For now, we assume that each collection can only have a single source. This will likely need
// to change later.
- activeCol, rootCol, isActiveFound = db.getActiveCollectionDown(ctx, txn, colsByID, sources[0].SourceCollectionID)
+ activeCol, rootCol, isActiveFound = db.getActiveCollectionDown(ctx, colsByID, sources[0].SourceCollectionID)
}
if !isActiveFound {
// We need to look both down and up for the active version - the most recent is not necessarily the active one.
- activeCol, isActiveFound = db.getActiveCollectionUp(ctx, txn, colsBySourceID, rootCol.ID)
+ activeCol, isActiveFound = db.getActiveCollectionUp(ctx, colsBySourceID, rootCol.ID)
}
var newName string
@@ -618,12 +1139,11 @@ func (db *db) setActiveSchemaVersion(
}
// Load the schema into the clients (e.g. GQL)
- return db.loadSchema(ctx, txn)
+ return db.loadSchema(ctx)
}
func (db *db) getActiveCollectionDown(
ctx context.Context,
- txn datastore.Txn,
colsByID map[uint32]client.CollectionDescription,
id uint32,
) (client.CollectionDescription, client.CollectionDescription, bool) {
@@ -646,12 +1166,11 @@ func (db *db) getActiveCollectionDown(
// For now, we assume that each collection can only have a single source. This will likely need
// to change later.
- return db.getActiveCollectionDown(ctx, txn, colsByID, sources[0].SourceCollectionID)
+ return db.getActiveCollectionDown(ctx, colsByID, sources[0].SourceCollectionID)
}
func (db *db) getActiveCollectionUp(
ctx context.Context,
- txn datastore.Txn,
colsBySourceID map[uint32][]client.CollectionDescription,
id uint32,
) (client.CollectionDescription, bool) {
@@ -665,7 +1184,7 @@ func (db *db) getActiveCollectionUp(
if col.Name.HasValue() {
return col, true
}
- activeCol, isFound := db.getActiveCollectionUp(ctx, txn, colsBySourceID, col.ID)
+ activeCol, isFound := db.getActiveCollectionUp(ctx, colsBySourceID, col.ID)
if isFound {
return activeCol, isFound
}
@@ -674,7 +1193,9 @@ func (db *db) getActiveCollectionUp(
return client.CollectionDescription{}, false
}
-func (db *db) getCollectionByID(ctx context.Context, txn datastore.Txn, id uint32) (client.Collection, error) {
+func (db *db) getCollectionByID(ctx context.Context, id uint32) (client.Collection, error) {
+ txn := mustGetContextTxn(ctx)
+
col, err := description.GetCollectionByID(ctx, txn, id)
if err != nil {
return nil, err
@@ -686,7 +1207,8 @@ func (db *db) getCollectionByID(ctx context.Context, txn datastore.Txn, id uint3
}
collection := db.newCollection(col, schema)
- err = collection.loadIndexes(ctx, txn)
+
+ err = collection.loadIndexes(ctx)
if err != nil {
return nil, err
}
@@ -695,12 +1217,12 @@ func (db *db) getCollectionByID(ctx context.Context, txn datastore.Txn, id uint3
}
// getCollectionByName returns an existing collection within the database.
-func (db *db) getCollectionByName(ctx context.Context, txn datastore.Txn, name string) (client.Collection, error) {
+func (db *db) getCollectionByName(ctx context.Context, name string) (client.Collection, error) {
if name == "" {
return nil, ErrCollectionNameEmpty
}
- cols, err := db.getCollections(ctx, txn, client.CollectionFetchOptions{Name: immutable.Some(name)})
+ cols, err := db.getCollections(ctx, client.CollectionFetchOptions{Name: immutable.Some(name)})
if err != nil {
return nil, err
}
@@ -716,11 +1238,11 @@ func (db *db) getCollectionByName(ctx context.Context, txn datastore.Txn, name s
// is provided.
func (db *db) getCollections(
ctx context.Context,
- txn datastore.Txn,
options client.CollectionFetchOptions,
) ([]client.Collection, error) {
- var cols []client.CollectionDescription
+ txn := mustGetContextTxn(ctx)
+ var cols []client.CollectionDescription
switch {
case options.Name.HasValue():
col, err := description.GetCollectionByName(ctx, txn, options.Name.Value())
@@ -789,7 +1311,7 @@ func (db *db) getCollections(
collection := db.newCollection(col, schema)
collections = append(collections, collection)
- err = collection.loadIndexes(ctx, txn)
+ err = collection.loadIndexes(ctx)
if err != nil {
return nil, err
}
@@ -799,7 +1321,9 @@ func (db *db) getCollections(
}
// getAllActiveDefinitions returns all queryable collection/views and any embedded schema used by them.
-func (db *db) getAllActiveDefinitions(ctx context.Context, txn datastore.Txn) ([]client.CollectionDefinition, error) {
+func (db *db) getAllActiveDefinitions(ctx context.Context) ([]client.CollectionDefinition, error) {
+ txn := mustGetContextTxn(ctx)
+
cols, err := description.GetActiveCollections(ctx, txn)
if err != nil {
return nil, err
@@ -814,7 +1338,7 @@ func (db *db) getAllActiveDefinitions(ctx context.Context, txn datastore.Txn) ([
collection := db.newCollection(col, schema)
- err = collection.loadIndexes(ctx, txn)
+ err = collection.loadIndexes(ctx)
if err != nil {
return nil, err
}
@@ -843,19 +1367,20 @@ func (db *db) getAllActiveDefinitions(ctx context.Context, txn datastore.Txn) ([
//
// @todo: We probably need a lock on the collection for this kind of op since
// it hits every key and will cause Tx conflicts for concurrent Txs
-func (c *collection) GetAllDocIDs(ctx context.Context) (<-chan client.DocIDResult, error) {
- txn, err := c.getTxn(ctx, true)
+func (c *collection) GetAllDocIDs(
+ ctx context.Context,
+) (<-chan client.DocIDResult, error) {
+ ctx, _, err := ensureContextTxn(ctx, c.db, true)
if err != nil {
return nil, err
}
-
- return c.getAllDocIDsChan(ctx, txn)
+ return c.getAllDocIDsChan(ctx)
}
func (c *collection) getAllDocIDsChan(
ctx context.Context,
- txn datastore.Txn,
) (<-chan client.DocIDResult, error) {
+ txn := mustGetContextTxn(ctx)
prefix := core.PrimaryDataStoreKey{ // empty path for all keys prefix
CollectionRootID: c.Description().RootID,
}
@@ -871,10 +1396,10 @@ func (c *collection) getAllDocIDsChan(
go func() {
defer func() {
if err := q.Close(); err != nil {
- log.ErrorE(ctx, errFailedtoCloseQueryReqAllIDs, err)
+ log.ErrorContextE(ctx, errFailedtoCloseQueryReqAllIDs, err)
}
close(resCh)
- c.discardImplicitTxn(ctx, txn)
+ txn.Discard(ctx)
}()
for res := range q.Next() {
// check for Done on context first
@@ -896,12 +1421,28 @@ func (c *collection) getAllDocIDsChan(
docID, err := client.NewDocIDFromString(rawDocID)
if err != nil {
resCh <- client.DocIDResult{
- Err: res.Error,
+ Err: err,
}
return
}
- resCh <- client.DocIDResult{
- ID: docID,
+
+ canRead, err := c.checkAccessOfDocWithACP(
+ ctx,
+ acp.ReadPermission,
+ docID.String(),
+ )
+
+ if err != nil {
+ resCh <- client.DocIDResult{
+ Err: err,
+ }
+ return
+ }
+
+ if canRead {
+ resCh <- client.DocIDResult{
+ ID: docID,
+ }
}
}
}()
@@ -937,50 +1478,45 @@ func (c *collection) Definition() client.CollectionDefinition {
return c.def
}
-// WithTxn returns a new instance of the collection, with a transaction
-// handle instead of a raw DB handle.
-func (c *collection) WithTxn(txn datastore.Txn) client.Collection {
- return &collection{
- db: c.db,
- txn: immutable.Some(txn),
- def: c.def,
- indexes: c.indexes,
- fetcherFactory: c.fetcherFactory,
- }
-}
-
// Create a new document.
// Will verify the DocID/CID to ensure that the new document is correctly formatted.
-func (c *collection) Create(ctx context.Context, doc *client.Document) error {
- txn, err := c.getTxn(ctx, false)
+func (c *collection) Create(
+ ctx context.Context,
+ doc *client.Document,
+) error {
+ ctx, txn, err := ensureContextTxn(ctx, c.db, false)
if err != nil {
return err
}
- defer c.discardImplicitTxn(ctx, txn)
+ defer txn.Discard(ctx)
- err = c.create(ctx, txn, doc)
+ err = c.create(ctx, doc)
if err != nil {
return err
}
- return c.commitImplicitTxn(ctx, txn)
+
+ return txn.Commit(ctx)
}
// CreateMany creates a collection of documents at once.
// Will verify the DocID/CID to ensure that the new documents are correctly formatted.
-func (c *collection) CreateMany(ctx context.Context, docs []*client.Document) error {
- txn, err := c.getTxn(ctx, false)
+func (c *collection) CreateMany(
+ ctx context.Context,
+ docs []*client.Document,
+) error {
+ ctx, txn, err := ensureContextTxn(ctx, c.db, false)
if err != nil {
return err
}
- defer c.discardImplicitTxn(ctx, txn)
+ defer txn.Discard(ctx)
for _, doc := range docs {
- err = c.create(ctx, txn, doc)
+ err = c.create(ctx, doc)
if err != nil {
return err
}
}
- return c.commitImplicitTxn(ctx, txn)
+ return txn.Commit(ctx)
}
func (c *collection) getDocIDAndPrimaryKeyFromDoc(
@@ -999,14 +1535,17 @@ func (c *collection) getDocIDAndPrimaryKeyFromDoc(
return docID, primaryKey, nil
}
-func (c *collection) create(ctx context.Context, txn datastore.Txn, doc *client.Document) error {
+func (c *collection) create(
+ ctx context.Context,
+ doc *client.Document,
+) error {
docID, primaryKey, err := c.getDocIDAndPrimaryKeyFromDoc(doc)
if err != nil {
return err
}
// check if doc already exists
- exists, isDeleted, err := c.exists(ctx, txn, primaryKey)
+ exists, isDeleted, err := c.exists(ctx, primaryKey)
if err != nil {
return err
}
@@ -1019,6 +1558,7 @@ func (c *collection) create(ctx context.Context, txn datastore.Txn, doc *client.
// write value object marker if we have an empty doc
if len(doc.Values()) == 0 {
+ txn := mustGetContextTxn(ctx)
valueKey := c.getDataStoreKeyFromDocID(docID)
err = txn.Datastore().Put(ctx, valueKey.ToDS(), []byte{base.ObjectMarker})
if err != nil {
@@ -1027,42 +1567,50 @@ func (c *collection) create(ctx context.Context, txn datastore.Txn, doc *client.
}
// write data to DB via MerkleClock/CRDT
- _, err = c.save(ctx, txn, doc, true)
+ _, err = c.save(ctx, doc, true)
if err != nil {
return err
}
- return c.indexNewDoc(ctx, txn, doc)
+ err = c.indexNewDoc(ctx, doc)
+ if err != nil {
+ return err
+ }
+
+ return c.registerDocWithACP(ctx, doc.ID().String())
}
// Update an existing document with the new values.
// Any field that needs to be removed or cleared should call doc.Clear(field) before.
// Any field that is nil/empty that hasn't called Clear will be ignored.
-func (c *collection) Update(ctx context.Context, doc *client.Document) error {
- txn, err := c.getTxn(ctx, false)
+func (c *collection) Update(
+ ctx context.Context,
+ doc *client.Document,
+) error {
+ ctx, txn, err := ensureContextTxn(ctx, c.db, false)
if err != nil {
return err
}
- defer c.discardImplicitTxn(ctx, txn)
+ defer txn.Discard(ctx)
primaryKey := c.getPrimaryKeyFromDocID(doc.ID())
- exists, isDeleted, err := c.exists(ctx, txn, primaryKey)
+ exists, isDeleted, err := c.exists(ctx, primaryKey)
if err != nil {
return err
}
if !exists {
- return client.ErrDocumentNotFound
+ return client.ErrDocumentNotFoundOrNotAuthorized
}
if isDeleted {
return NewErrDocumentDeleted(primaryKey.DocID)
}
- err = c.update(ctx, txn, doc)
+ err = c.update(ctx, doc)
if err != nil {
return err
}
- return c.commitImplicitTxn(ctx, txn)
+ return txn.Commit(ctx)
}
// Contract: DB Exists check is already performed, and a doc with the given ID exists.
@@ -1070,8 +1618,24 @@ func (c *collection) Update(ctx context.Context, doc *client.Document) error {
// or, just update everything regardless.
// Should probably be smart about the update due to the MerkleCRDT overhead, shouldn't
// add to the bloat.
-func (c *collection) update(ctx context.Context, txn datastore.Txn, doc *client.Document) error {
- _, err := c.save(ctx, txn, doc, false)
+func (c *collection) update(
+ ctx context.Context,
+ doc *client.Document,
+) error {
+ // Stop the update if the correct permissions aren't there.
+ canUpdate, err := c.checkAccessOfDocWithACP(
+ ctx,
+ acp.WritePermission,
+ doc.ID().String(),
+ )
+ if err != nil {
+ return err
+ }
+ if !canUpdate {
+ return client.ErrDocumentNotFoundOrNotAuthorized
+ }
+
+ _, err = c.save(ctx, doc, false)
if err != nil {
return err
}
@@ -1080,16 +1644,19 @@ func (c *collection) update(ctx context.Context, txn datastore.Txn, doc *client.
// Save a document into the db.
// Either by creating a new document or by updating an existing one
-func (c *collection) Save(ctx context.Context, doc *client.Document) error {
- txn, err := c.getTxn(ctx, false)
+func (c *collection) Save(
+ ctx context.Context,
+ doc *client.Document,
+) error {
+ ctx, txn, err := ensureContextTxn(ctx, c.db, false)
if err != nil {
return err
}
- defer c.discardImplicitTxn(ctx, txn)
+ defer txn.Discard(ctx)
// Check if document already exists with primary DS key.
primaryKey := c.getPrimaryKeyFromDocID(doc.ID())
- exists, isDeleted, err := c.exists(ctx, txn, primaryKey)
+ exists, isDeleted, err := c.exists(ctx, primaryKey)
if err != nil {
return err
}
@@ -1099,29 +1666,33 @@ func (c *collection) Save(ctx context.Context, doc *client.Document) error {
}
if exists {
- err = c.update(ctx, txn, doc)
+ err = c.update(ctx, doc)
} else {
- err = c.create(ctx, txn, doc)
+ err = c.create(ctx, doc)
}
if err != nil {
return err
}
- return c.commitImplicitTxn(ctx, txn)
+ return txn.Commit(ctx)
}
+// save saves the document state. save MUST not be called outside the `c.create`
+// and `c.update` methods as we wrap the acp logic within those methods. Calling
+// save elsewhere could cause the omission of acp checks.
func (c *collection) save(
ctx context.Context,
- txn datastore.Txn,
doc *client.Document,
isCreate bool,
) (cid.Cid, error) {
if !isCreate {
- err := c.updateIndexedDoc(ctx, txn, doc)
+ err := c.updateIndexedDoc(ctx, doc)
if err != nil {
return cid.Undef, err
}
}
+ txn := mustGetContextTxn(ctx)
+
// NOTE: We delay the final Clean() call until we know
// the commit on the transaction is successful. If we didn't
// wait, and just did it here, then *if* the commit fails down
@@ -1164,7 +1735,13 @@ func (c *collection) save(
if isSecondaryRelationID {
primaryId := val.Value().(string)
- err = c.patchPrimaryDoc(ctx, txn, c.Name().Value(), relationFieldDescription, primaryKey.DocID, primaryId)
+ err = c.patchPrimaryDoc(
+ ctx,
+ c.Name().Value(),
+ relationFieldDescription,
+ primaryKey.DocID,
+ primaryId,
+ )
if err != nil {
return cid.Undef, err
}
@@ -1174,7 +1751,12 @@ func (c *collection) save(
continue
}
- err = c.validateOneToOneLinkDoesntAlreadyExist(ctx, txn, doc.ID().String(), fieldDescription, val.Value())
+ err = c.validateOneToOneLinkDoesntAlreadyExist(
+ ctx,
+ doc.ID().String(),
+ fieldDescription,
+ val.Value(),
+ )
if err != nil {
return cid.Undef, err
}
@@ -1206,7 +1788,6 @@ func (c *collection) save(
headNode, priority, err := c.saveCompositeToMerkleCRDT(
ctx,
- txn,
primaryKey.ToDataStoreKey(),
links,
client.Active,
@@ -1240,7 +1821,6 @@ func (c *collection) save(
func (c *collection) validateOneToOneLinkDoesntAlreadyExist(
ctx context.Context,
- txn datastore.Txn,
docID string,
fieldDescription client.FieldDefinition,
value any,
@@ -1259,22 +1839,22 @@ func (c *collection) validateOneToOneLinkDoesntAlreadyExist(
if !ok {
return client.NewErrFieldNotExist(strings.TrimSuffix(fieldDescription.Name, request.RelatedObjectID))
}
- if objFieldDescription.Kind != client.FieldKind_FOREIGN_OBJECT {
+ if !(objFieldDescription.Kind.IsObject() && !objFieldDescription.Kind.IsArray()) {
return nil
}
- otherCol, err := c.db.getCollectionByName(ctx, txn, objFieldDescription.Schema)
+ otherCol, err := c.db.getCollectionByName(ctx, objFieldDescription.Kind.Underlying())
if err != nil {
return err
}
- otherSchema := otherCol.Schema()
otherObjFieldDescription, _ := otherCol.Description().GetFieldByRelation(
fieldDescription.RelationName,
c.Name().Value(),
objFieldDescription.Name,
- &otherSchema,
)
- if otherObjFieldDescription.Kind != client.FieldKind_FOREIGN_OBJECT {
+ if !(otherObjFieldDescription.Kind.HasValue() &&
+ otherObjFieldDescription.Kind.Value().IsObject() &&
+ !otherObjFieldDescription.Kind.Value().IsArray()) {
// If the other field is not an object field then this is not a one to one relation and we can continue
return nil
}
@@ -1286,7 +1866,7 @@ func (c *collection) validateOneToOneLinkDoesntAlreadyExist(
fieldDescription.Name,
value,
)
- selectionPlan, err := c.makeSelectionPlan(ctx, txn, filter)
+ selectionPlan, err := c.makeSelectionPlan(ctx, filter)
if err != nil {
return err
}
@@ -1338,54 +1918,61 @@ func (c *collection) validateOneToOneLinkDoesntAlreadyExist(
// otherwise will return false, along with an error, if it cannot.
// If the document doesn't exist, then it will return false, and a ErrDocumentNotFound error.
// This operation will all state relating to the given DocID. This includes data, block, and head storage.
-func (c *collection) Delete(ctx context.Context, docID client.DocID) (bool, error) {
- txn, err := c.getTxn(ctx, false)
+func (c *collection) Delete(
+ ctx context.Context,
+ docID client.DocID,
+) (bool, error) {
+ ctx, txn, err := ensureContextTxn(ctx, c.db, false)
if err != nil {
return false, err
}
- defer c.discardImplicitTxn(ctx, txn)
+ defer txn.Discard(ctx)
primaryKey := c.getPrimaryKeyFromDocID(docID)
- exists, isDeleted, err := c.exists(ctx, txn, primaryKey)
- if err != nil {
- return false, err
- }
- if !exists || isDeleted {
- return false, client.ErrDocumentNotFound
- }
- if isDeleted {
- return false, NewErrDocumentDeleted(primaryKey.DocID)
- }
- err = c.applyDelete(ctx, txn, primaryKey)
+ err = c.applyDelete(ctx, primaryKey)
if err != nil {
return false, err
}
- return true, c.commitImplicitTxn(ctx, txn)
+ return true, txn.Commit(ctx)
}
// Exists checks if a given document exists with supplied DocID.
-func (c *collection) Exists(ctx context.Context, docID client.DocID) (bool, error) {
- txn, err := c.getTxn(ctx, false)
+func (c *collection) Exists(
+ ctx context.Context,
+ docID client.DocID,
+) (bool, error) {
+ ctx, txn, err := ensureContextTxn(ctx, c.db, false)
if err != nil {
return false, err
}
- defer c.discardImplicitTxn(ctx, txn)
+ defer txn.Discard(ctx)
primaryKey := c.getPrimaryKeyFromDocID(docID)
- exists, isDeleted, err := c.exists(ctx, txn, primaryKey)
+ exists, isDeleted, err := c.exists(ctx, primaryKey)
if err != nil && !errors.Is(err, ds.ErrNotFound) {
return false, err
}
- return exists && !isDeleted, c.commitImplicitTxn(ctx, txn)
+ return exists && !isDeleted, txn.Commit(ctx)
}
// check if a document exists with the given primary key
func (c *collection) exists(
ctx context.Context,
- txn datastore.Txn,
primaryKey core.PrimaryDataStoreKey,
) (exists bool, isDeleted bool, err error) {
+ canRead, err := c.checkAccessOfDocWithACP(
+ ctx,
+ acp.ReadPermission,
+ primaryKey.DocID,
+ )
+ if err != nil {
+ return false, false, err
+ } else if !canRead {
+ return false, false, nil
+ }
+
+ txn := mustGetContextTxn(ctx)
val, err := txn.Datastore().Get(ctx, primaryKey.ToDS())
if err != nil && errors.Is(err, ds.ErrNotFound) {
return false, false, nil
@@ -1399,13 +1986,17 @@ func (c *collection) exists(
return true, false, nil
}
+// saveCompositeToMerkleCRDT saves the composite to the merkle CRDT.
+// saveCompositeToMerkleCRDT MUST not be called outside the `c.save`
+// and `c.applyDelete` methods as we wrap the acp logic around those methods.
+// Calling it elsewhere could cause the omission of acp checks.
func (c *collection) saveCompositeToMerkleCRDT(
ctx context.Context,
- txn datastore.Txn,
dsKey core.DataStoreKey,
links []core.DAGLink,
status client.DocumentStatus,
) (ipld.Node, uint64, error) {
+ txn := mustGetContextTxn(ctx)
dsKey = dsKey.WithFieldId(core.COMPOSITE_NAMESPACE)
merkleCRDT := merklecrdt.NewMerkleCompositeDAG(
txn,
@@ -1421,35 +2012,6 @@ func (c *collection) saveCompositeToMerkleCRDT(
return merkleCRDT.Save(ctx, links)
}
-// getTxn gets or creates a new transaction from the underlying db.
-// If the collection already has a txn, return the existing one.
-// Otherwise, create a new implicit transaction.
-func (c *collection) getTxn(ctx context.Context, readonly bool) (datastore.Txn, error) {
- if c.txn.HasValue() {
- return c.txn.Value(), nil
- }
- return c.db.NewTxn(ctx, readonly)
-}
-
-// discardImplicitTxn is a proxy function used by the collection to execute the Discard()
-// transaction function only if its an implicit transaction.
-//
-// Implicit transactions are transactions that are created *during* an operation execution as a side effect.
-//
-// Explicit transactions are provided to the collection object via the "WithTxn(...)" function.
-func (c *collection) discardImplicitTxn(ctx context.Context, txn datastore.Txn) {
- if !c.txn.HasValue() {
- txn.Discard(ctx)
- }
-}
-
-func (c *collection) commitImplicitTxn(ctx context.Context, txn datastore.Txn) error {
- if !c.txn.HasValue() {
- return txn.Commit(ctx)
- }
- return nil
-}
-
func (c *collection) getPrimaryKeyFromDocID(docID client.DocID) core.PrimaryDataStoreKey {
return core.PrimaryDataStoreKey{
CollectionRootID: c.Description().RootID,
@@ -1466,7 +2028,7 @@ func (c *collection) getDataStoreKeyFromDocID(docID client.DocID) core.DataStore
}
func (c *collection) tryGetFieldKey(primaryKey core.PrimaryDataStoreKey, fieldName string) (core.DataStoreKey, bool) {
- fieldId, hasField := c.tryGetSchemaFieldID(fieldName)
+ fieldId, hasField := c.tryGetFieldID(fieldName)
if !hasField {
return core.DataStoreKey{}, false
}
@@ -1478,9 +2040,9 @@ func (c *collection) tryGetFieldKey(primaryKey core.PrimaryDataStoreKey, fieldNa
}, true
}
-// tryGetSchemaFieldID returns the FieldID of the given fieldName.
+// tryGetFieldID returns the FieldID of the given fieldName.
// Will return false if the field is not found.
-func (c *collection) tryGetSchemaFieldID(fieldName string) (uint32, bool) {
+func (c *collection) tryGetFieldID(fieldName string) (uint32, bool) {
for _, field := range c.Definition().GetFields() {
if field.Name == fieldName {
if field.Kind.IsObject() || field.Kind.IsObjectArray() {
diff --git a/db/collection_acp.go b/db/collection_acp.go
new file mode 100644
index 0000000000..4a273e907e
--- /dev/null
+++ b/db/collection_acp.go
@@ -0,0 +1,67 @@
+// Copyright 2024 Democratized Data Foundation
+//
+// Use of this software is governed by the Business Source License
+// included in the file licenses/BSL.txt.
+//
+// As of the Change Date specified in that file, in accordance with
+// the Business Source License, use of this software will be governed
+// by the Apache License, Version 2.0, included in the file
+// licenses/APL.txt.
+
+package db
+
+import (
+ "context"
+
+ "github.com/sourcenetwork/defradb/acp"
+ "github.com/sourcenetwork/defradb/db/permission"
+)
+
+// registerDocWithACP handles the registration of the document with acp.
+// The registering is done at document creation on the collection.
+//
+// According to our access logic we have these components to worry about:
+// (1) the request is permissioned (has an identity signature),
+// (2) the collection is permissioned (has a policy),
+// (3) acp is available (acp is enabled).
+//
+// The document is only registered if all (1) (2) and (3) are true.
+//
+// Otherwise, nothing is registered with the acp system.
+func (c *collection) registerDocWithACP(
+ ctx context.Context,
+ docID string,
+) error {
+ // If acp is not available, then no document is registered.
+ if !c.db.acp.HasValue() {
+ return nil
+ }
+ identity := GetContextIdentity(ctx)
+ return permission.RegisterDocOnCollectionWithACP(
+ ctx,
+ identity,
+ c.db.acp.Value(),
+ c,
+ docID,
+ )
+}
+
+func (c *collection) checkAccessOfDocWithACP(
+ ctx context.Context,
+ dpiPermission acp.DPIPermission,
+ docID string,
+) (bool, error) {
+ // If acp is not available, then we have unrestricted access.
+ if !c.db.acp.HasValue() {
+ return true, nil
+ }
+ identity := GetContextIdentity(ctx)
+ return permission.CheckAccessOfDocOnCollectionWithACP(
+ ctx,
+ identity,
+ c.db.acp.Value(),
+ c,
+ dpiPermission,
+ docID,
+ )
+}
diff --git a/db/collection_delete.go b/db/collection_delete.go
index 785b2830d7..62ebd7f167 100644
--- a/db/collection_delete.go
+++ b/db/collection_delete.go
@@ -13,156 +13,39 @@ package db
import (
"context"
+ "github.com/sourcenetwork/defradb/acp"
"github.com/sourcenetwork/defradb/client"
- "github.com/sourcenetwork/defradb/client/request"
"github.com/sourcenetwork/defradb/core"
- "github.com/sourcenetwork/defradb/datastore"
"github.com/sourcenetwork/defradb/events"
"github.com/sourcenetwork/defradb/merkle/clock"
)
-// DeleteWith deletes a target document.
-//
-// Target can be a Filter statement, a single DocID, a single document,
-// an array of DocIDs, or an array of documents.
-//
-// If you want more type safety, use the respective typed versions of Delete.
-// Eg: DeleteWithFilter or DeleteWithDocID
-func (c *collection) DeleteWith(
- ctx context.Context,
- target any,
-) (*client.DeleteResult, error) {
- switch t := target.(type) {
- case string, map[string]any, *request.Filter:
- return c.DeleteWithFilter(ctx, t)
- case client.DocID:
- return c.DeleteWithDocID(ctx, t)
- case []client.DocID:
- return c.DeleteWithDocIDs(ctx, t)
- default:
- return nil, client.ErrInvalidDeleteTarget
- }
-}
-
-// DeleteWithDocID deletes using a DocID to target a single document for delete.
-func (c *collection) DeleteWithDocID(
- ctx context.Context,
- docID client.DocID,
-) (*client.DeleteResult, error) {
- txn, err := c.getTxn(ctx, false)
- if err != nil {
- return nil, err
- }
-
- defer c.discardImplicitTxn(ctx, txn)
-
- dsKey := c.getPrimaryKeyFromDocID(docID)
- res, err := c.deleteWithKey(ctx, txn, dsKey)
- if err != nil {
- return nil, err
- }
-
- return res, c.commitImplicitTxn(ctx, txn)
-}
-
-// DeleteWithDocIDs is the same as DeleteWithDocID but accepts multiple DocIDs as a slice.
-func (c *collection) DeleteWithDocIDs(
- ctx context.Context,
- docIDs []client.DocID,
-) (*client.DeleteResult, error) {
- txn, err := c.getTxn(ctx, false)
- if err != nil {
- return nil, err
- }
-
- defer c.discardImplicitTxn(ctx, txn)
-
- res, err := c.deleteWithIDs(ctx, txn, docIDs, client.Deleted)
- if err != nil {
- return nil, err
- }
-
- return res, c.commitImplicitTxn(ctx, txn)
-}
-
// DeleteWithFilter deletes using a filter to target documents for delete.
func (c *collection) DeleteWithFilter(
ctx context.Context,
filter any,
) (*client.DeleteResult, error) {
- txn, err := c.getTxn(ctx, false)
- if err != nil {
- return nil, err
- }
-
- defer c.discardImplicitTxn(ctx, txn)
-
- res, err := c.deleteWithFilter(ctx, txn, filter, client.Deleted)
+ ctx, txn, err := ensureContextTxn(ctx, c.db, false)
if err != nil {
return nil, err
}
+ defer txn.Discard(ctx)
- return res, c.commitImplicitTxn(ctx, txn)
-}
-
-func (c *collection) deleteWithKey(
- ctx context.Context,
- txn datastore.Txn,
- key core.PrimaryDataStoreKey,
-) (*client.DeleteResult, error) {
- // Check the key we have been given to delete with actually has a corresponding
- // document (i.e. document actually exists in the collection).
- err := c.applyDelete(ctx, txn, key)
+ res, err := c.deleteWithFilter(ctx, filter, client.Deleted)
if err != nil {
return nil, err
}
- // Upon successfull deletion, record a summary.
- results := &client.DeleteResult{
- Count: 1,
- DocIDs: []string{key.DocID},
- }
-
- return results, nil
-}
-
-func (c *collection) deleteWithIDs(
- ctx context.Context,
- txn datastore.Txn,
- docIDs []client.DocID,
- _ client.DocumentStatus,
-) (*client.DeleteResult, error) {
- results := &client.DeleteResult{
- DocIDs: make([]string, 0),
- }
-
- for _, docID := range docIDs {
- primaryKey := c.getPrimaryKeyFromDocID(docID)
-
- // Apply the function that will perform the full deletion of this document.
- err := c.applyDelete(ctx, txn, primaryKey)
- if err != nil {
- return nil, err
- }
-
- // Add this deleted docID to our list.
- results.DocIDs = append(results.DocIDs, docID.String())
- }
-
- // Upon successfull deletion, record a summary of how many we deleted.
- results.Count = int64(len(results.DocIDs))
-
- return results, nil
+ return res, txn.Commit(ctx)
}
func (c *collection) deleteWithFilter(
ctx context.Context,
- txn datastore.Txn,
filter any,
_ client.DocumentStatus,
) (*client.DeleteResult, error) {
// Make a selection plan that will scan through only the documents with matching filter.
- selectionPlan, err := c.makeSelectionPlan(ctx, txn, filter)
+ selectionPlan, err := c.makeSelectionPlan(ctx, filter)
if err != nil {
return nil, err
}
@@ -179,7 +62,7 @@ func (c *collection) deleteWithFilter(
// If the plan isn't properly closed at any exit point log the error.
defer func() {
if err := selectionPlan.Close(); err != nil {
- log.ErrorE(ctx, "Failed to close the request plan, after filter delete", err)
+ log.ErrorContextE(ctx, "Failed to close the request plan, after filter delete", err)
}
}()
@@ -210,7 +93,7 @@ func (c *collection) deleteWithFilter(
}
// Delete the document that is associated with this DS key we got from the filter.
- err = c.applyDelete(ctx, txn, primaryKey)
+ err = c.applyDelete(ctx, primaryKey)
if err != nil {
return nil, err
}
@@ -226,22 +109,36 @@ func (c *collection) deleteWithFilter(
func (c *collection) applyDelete(
ctx context.Context,
- txn datastore.Txn,
primaryKey core.PrimaryDataStoreKey,
) error {
- found, isDeleted, err := c.exists(ctx, txn, primaryKey)
+ // Must also have read permission to delete, inorder to check if document exists.
+ found, isDeleted, err := c.exists(ctx, primaryKey)
if err != nil {
return err
}
if !found {
- return client.ErrDocumentNotFound
+ return client.ErrDocumentNotFoundOrNotAuthorized
}
if isDeleted {
return NewErrDocumentDeleted(primaryKey.DocID)
}
- dsKey := primaryKey.ToDataStoreKey()
+ // Stop deletion of document if the correct permissions aren't there.
+ canDelete, err := c.checkAccessOfDocWithACP(
+ ctx,
+ acp.WritePermission,
+ primaryKey.DocID,
+ )
+ if err != nil {
+ return err
+ }
+ if !canDelete {
+ return client.ErrDocumentNotFoundOrNotAuthorized
+ }
+
+ txn := mustGetContextTxn(ctx)
+ dsKey := primaryKey.ToDataStoreKey()
headset := clock.NewHeadSet(
txn.Headstore(),
dsKey.WithFieldId(core.COMPOSITE_NAMESPACE).ToHeadStoreKey(),
@@ -261,7 +158,6 @@ func (c *collection) applyDelete(
headNode, priority, err := c.saveCompositeToMerkleCRDT(
ctx,
- txn,
dsKey,
dagLinks,
client.Deleted,
diff --git a/db/collection_get.go b/db/collection_get.go
index cf245fc678..75d3d2826b 100644
--- a/db/collection_get.go
+++ b/db/collection_get.go
@@ -15,46 +15,55 @@ import (
"github.com/sourcenetwork/defradb/client"
"github.com/sourcenetwork/defradb/core"
- "github.com/sourcenetwork/defradb/datastore"
"github.com/sourcenetwork/defradb/db/base"
"github.com/sourcenetwork/defradb/db/fetcher"
)
-func (c *collection) Get(ctx context.Context, docID client.DocID, showDeleted bool) (*client.Document, error) {
+func (c *collection) Get(
+ ctx context.Context,
+ docID client.DocID,
+ showDeleted bool,
+) (*client.Document, error) {
// create txn
- txn, err := c.getTxn(ctx, true)
+ ctx, txn, err := ensureContextTxn(ctx, c.db, true)
if err != nil {
return nil, err
}
- defer c.discardImplicitTxn(ctx, txn)
+ defer txn.Discard(ctx)
primaryKey := c.getPrimaryKeyFromDocID(docID)
- found, isDeleted, err := c.exists(ctx, txn, primaryKey)
+ found, isDeleted, err := c.exists(ctx, primaryKey)
if err != nil {
return nil, err
}
if !found || (isDeleted && !showDeleted) {
- return nil, client.ErrDocumentNotFound
+ return nil, client.ErrDocumentNotFoundOrNotAuthorized
}
- doc, err := c.get(ctx, txn, primaryKey, nil, showDeleted)
+ doc, err := c.get(ctx, primaryKey, nil, showDeleted)
if err != nil {
return nil, err
}
- return doc, c.commitImplicitTxn(ctx, txn)
+
+ if doc == nil {
+ return nil, client.ErrDocumentNotFoundOrNotAuthorized
+ }
+
+ return doc, txn.Commit(ctx)
}
func (c *collection) get(
ctx context.Context,
- txn datastore.Txn,
primaryKey core.PrimaryDataStoreKey,
fields []client.FieldDefinition,
showDeleted bool,
) (*client.Document, error) {
+ txn := mustGetContextTxn(ctx)
+ identity := GetContextIdentity(ctx)
// create a new document fetcher
df := c.newFetcher()
// initialize it with the primary index
- err := df.Init(ctx, txn, c, fields, nil, nil, false, showDeleted)
+ err := df.Init(ctx, identity, txn, c.db.acp, c, fields, nil, nil, false, showDeleted)
if err != nil {
_ = df.Close()
return nil, err
@@ -85,7 +94,7 @@ func (c *collection) get(
return nil, nil
}
- doc, err := fetcher.Decode(encodedDoc, c.Schema())
+ doc, err := fetcher.Decode(encodedDoc, c.Definition())
if err != nil {
return nil, err
}
diff --git a/db/collection_index.go b/db/collection_index.go
index 7fb036498a..2327ae027a 100644
--- a/db/collection_index.go
+++ b/db/collection_index.go
@@ -32,36 +32,33 @@ import (
// createCollectionIndex creates a new collection index and saves it to the database in its system store.
func (db *db) createCollectionIndex(
ctx context.Context,
- txn datastore.Txn,
collectionName string,
desc client.IndexDescription,
) (client.IndexDescription, error) {
- col, err := db.getCollectionByName(ctx, txn, collectionName)
+ col, err := db.getCollectionByName(ctx, collectionName)
if err != nil {
return client.IndexDescription{}, NewErrCanNotReadCollection(collectionName, err)
}
- col = col.WithTxn(txn)
return col.CreateIndex(ctx, desc)
}
func (db *db) dropCollectionIndex(
ctx context.Context,
- txn datastore.Txn,
collectionName, indexName string,
) error {
- col, err := db.getCollectionByName(ctx, txn, collectionName)
+ col, err := db.getCollectionByName(ctx, collectionName)
if err != nil {
return NewErrCanNotReadCollection(collectionName, err)
}
- col = col.WithTxn(txn)
return col.DropIndex(ctx, indexName)
}
// getAllIndexDescriptions returns all the index descriptions in the database.
func (db *db) getAllIndexDescriptions(
ctx context.Context,
- txn datastore.Txn,
) (map[client.CollectionName][]client.IndexDescription, error) {
+ // callers of this function must set a context transaction
+ txn := mustGetContextTxn(ctx)
prefix := core.NewCollectionIndexKey(immutable.None[uint32](), "")
keys, indexDescriptions, err := datastore.DeserializePrefix[client.IndexDescription](ctx,
@@ -95,12 +92,16 @@ func (db *db) getAllIndexDescriptions(
func (db *db) fetchCollectionIndexDescriptions(
ctx context.Context,
- txn datastore.Txn,
colID uint32,
) ([]client.IndexDescription, error) {
+ // callers of this function must set a context transaction
+ txn := mustGetContextTxn(ctx)
prefix := core.NewCollectionIndexKey(immutable.Some(colID), "")
- _, indexDescriptions, err := datastore.DeserializePrefix[client.IndexDescription](ctx,
- prefix.ToString(), txn.Systemstore())
+ _, indexDescriptions, err := datastore.DeserializePrefix[client.IndexDescription](
+ ctx,
+ prefix.ToString(),
+ txn.Systemstore(),
+ )
if err != nil {
return nil, err
}
@@ -108,59 +109,61 @@ func (db *db) fetchCollectionIndexDescriptions(
}
func (c *collection) CreateDocIndex(ctx context.Context, doc *client.Document) error {
- txn, err := c.getTxn(ctx, false)
+ ctx, txn, err := ensureContextTxn(ctx, c.db, false)
if err != nil {
return err
}
- defer c.discardImplicitTxn(ctx, txn)
+ defer txn.Discard(ctx)
- err = c.indexNewDoc(ctx, txn, doc)
+ err = c.indexNewDoc(ctx, doc)
if err != nil {
return err
}
- return c.commitImplicitTxn(ctx, txn)
+ return txn.Commit(ctx)
}
func (c *collection) UpdateDocIndex(ctx context.Context, oldDoc, newDoc *client.Document) error {
- txn, err := c.getTxn(ctx, false)
+ ctx, txn, err := ensureContextTxn(ctx, c.db, false)
if err != nil {
return err
}
- defer c.discardImplicitTxn(ctx, txn)
+ defer txn.Discard(ctx)
- err = c.deleteIndexedDoc(ctx, txn, oldDoc)
+ err = c.deleteIndexedDoc(ctx, oldDoc)
if err != nil {
return err
}
- err = c.indexNewDoc(ctx, txn, newDoc)
+ err = c.indexNewDoc(ctx, newDoc)
if err != nil {
return err
}
- return c.commitImplicitTxn(ctx, txn)
+ return txn.Commit(ctx)
}
func (c *collection) DeleteDocIndex(ctx context.Context, doc *client.Document) error {
- txn, err := c.getTxn(ctx, false)
+ ctx, txn, err := ensureContextTxn(ctx, c.db, false)
if err != nil {
return err
}
- defer c.discardImplicitTxn(ctx, txn)
+ defer txn.Discard(ctx)
- err = c.deleteIndexedDoc(ctx, txn, doc)
+ err = c.deleteIndexedDoc(ctx, doc)
if err != nil {
return err
}
- return c.commitImplicitTxn(ctx, txn)
+ return txn.Commit(ctx)
}
-func (c *collection) indexNewDoc(ctx context.Context, txn datastore.Txn, doc *client.Document) error {
- err := c.loadIndexes(ctx, txn)
+func (c *collection) indexNewDoc(ctx context.Context, doc *client.Document) error {
+ err := c.loadIndexes(ctx)
if err != nil {
return err
}
+ // callers of this function must set a context transaction
+ txn := mustGetContextTxn(ctx)
for _, index := range c.indexes {
err = index.Save(ctx, txn, doc)
if err != nil {
@@ -172,16 +175,16 @@ func (c *collection) indexNewDoc(ctx context.Context, txn datastore.Txn, doc *cl
func (c *collection) updateIndexedDoc(
ctx context.Context,
- txn datastore.Txn,
doc *client.Document,
) error {
- err := c.loadIndexes(ctx, txn)
+ err := c.loadIndexes(ctx)
if err != nil {
return err
}
+ // TODO-ACP: https://github.com/sourcenetwork/defradb/issues/2365 - ACP <> Indexing, possibly also check
+ // and handle the case of when oldDoc == nil (will be nil if inaccessible document).
oldDoc, err := c.get(
ctx,
- txn,
c.getPrimaryKeyFromDocID(doc.ID()),
c.Definition().CollectIndexedFields(),
false,
@@ -189,6 +192,7 @@ func (c *collection) updateIndexedDoc(
if err != nil {
return err
}
+ txn := mustGetContextTxn(ctx)
for _, index := range c.indexes {
err = index.Update(ctx, txn, oldDoc, doc)
if err != nil {
@@ -200,13 +204,13 @@ func (c *collection) updateIndexedDoc(
func (c *collection) deleteIndexedDoc(
ctx context.Context,
- txn datastore.Txn,
doc *client.Document,
) error {
- err := c.loadIndexes(ctx, txn)
+ err := c.loadIndexes(ctx)
if err != nil {
return err
}
+ txn := mustGetContextTxn(ctx)
for _, index := range c.indexes {
err = index.Delete(ctx, txn, doc)
if err != nil {
@@ -235,24 +239,29 @@ func (c *collection) CreateIndex(
ctx context.Context,
desc client.IndexDescription,
) (client.IndexDescription, error) {
- txn, err := c.getTxn(ctx, false)
+ ctx, txn, err := ensureContextTxn(ctx, c.db, false)
if err != nil {
return client.IndexDescription{}, err
}
- defer c.discardImplicitTxn(ctx, txn)
+ defer txn.Discard(ctx)
- index, err := c.createIndex(ctx, txn, desc)
+ index, err := c.createIndex(ctx, desc)
if err != nil {
return client.IndexDescription{}, err
}
- return index.Description(), c.commitImplicitTxn(ctx, txn)
+ return index.Description(), txn.Commit(ctx)
}
func (c *collection) createIndex(
ctx context.Context,
- txn datastore.Txn,
desc client.IndexDescription,
) (CollectionIndex, error) {
+ // Don't allow creating index on a permissioned collection, until following is implemented.
+ // TODO-ACP: ACP <> INDEX https://github.com/sourcenetwork/defradb/issues/2365
+ if c.Description().Policy.HasValue() {
+ return nil, ErrCanNotCreateIndexOnCollectionWithPolicy
+ }
+
if desc.Name != "" && !schema.IsValidIndexName(desc.Name) {
return nil, schema.NewErrIndexWithInvalidName("!")
}
@@ -266,20 +275,19 @@ func (c *collection) createIndex(
return nil, err
}
- indexKey, err := c.generateIndexNameIfNeededAndCreateKey(ctx, txn, &desc)
+ indexKey, err := c.generateIndexNameIfNeededAndCreateKey(ctx, &desc)
if err != nil {
return nil, err
}
colSeq, err := c.db.getSequence(
ctx,
- txn,
core.NewIndexIDSequenceKey(c.ID()),
)
if err != nil {
return nil, err
}
- colID, err := colSeq.next(ctx, txn)
+ colID, err := colSeq.next(ctx)
if err != nil {
return nil, err
}
@@ -290,6 +298,7 @@ func (c *collection) createIndex(
return nil, err
}
+ txn := mustGetContextTxn(ctx)
err = txn.Systemstore().Put(ctx, indexKey.ToDS(), buf)
if err != nil {
return nil, err
@@ -300,7 +309,7 @@ func (c *collection) createIndex(
}
c.def.Description.Indexes = append(c.def.Description.Indexes, colIndex.Description())
c.indexes = append(c.indexes, colIndex)
- err = c.indexExistingDocs(ctx, txn, colIndex)
+ err = c.indexExistingDocs(ctx, colIndex)
if err != nil {
removeErr := colIndex.RemoveAll(ctx, txn)
return nil, errors.Join(err, removeErr)
@@ -310,12 +319,25 @@ func (c *collection) createIndex(
func (c *collection) iterateAllDocs(
ctx context.Context,
- txn datastore.Txn,
fields []client.FieldDefinition,
exec func(doc *client.Document) error,
) error {
+ txn := mustGetContextTxn(ctx)
+ identity := GetContextIdentity(ctx)
+
df := c.newFetcher()
- err := df.Init(ctx, txn, c, fields, nil, nil, false, false)
+ err := df.Init(
+ ctx,
+ identity,
+ txn,
+ c.db.acp,
+ c,
+ fields,
+ nil,
+ nil,
+ false,
+ false,
+ )
if err != nil {
return errors.Join(err, df.Close())
}
@@ -336,7 +358,7 @@ func (c *collection) iterateAllDocs(
break
}
- doc, err := fetcher.Decode(encodedDoc, c.Schema())
+ doc, err := fetcher.Decode(encodedDoc, c.Definition())
if err != nil {
return errors.Join(err, df.Close())
}
@@ -352,7 +374,6 @@ func (c *collection) iterateAllDocs(
func (c *collection) indexExistingDocs(
ctx context.Context,
- txn datastore.Txn,
index CollectionIndex,
) error {
fields := make([]client.FieldDefinition, 0, 1)
@@ -362,8 +383,8 @@ func (c *collection) indexExistingDocs(
fields = append(fields, colField)
}
}
-
- return c.iterateAllDocs(ctx, txn, fields, func(doc *client.Document) error {
+ txn := mustGetContextTxn(ctx)
+ return c.iterateAllDocs(ctx, fields, func(doc *client.Document) error {
return index.Save(ctx, txn, doc)
})
}
@@ -374,24 +395,25 @@ func (c *collection) indexExistingDocs(
//
// All index artifacts for existing documents related the index will be removed.
func (c *collection) DropIndex(ctx context.Context, indexName string) error {
- txn, err := c.getTxn(ctx, false)
+ ctx, txn, err := ensureContextTxn(ctx, c.db, false)
if err != nil {
return err
}
- defer c.discardImplicitTxn(ctx, txn)
+ defer txn.Discard(ctx)
- err = c.dropIndex(ctx, txn, indexName)
+ err = c.dropIndex(ctx, indexName)
if err != nil {
return err
}
- return c.commitImplicitTxn(ctx, txn)
+ return txn.Commit(ctx)
}
-func (c *collection) dropIndex(ctx context.Context, txn datastore.Txn, indexName string) error {
- err := c.loadIndexes(ctx, txn)
+func (c *collection) dropIndex(ctx context.Context, indexName string) error {
+ err := c.loadIndexes(ctx)
if err != nil {
return err
}
+ txn := mustGetContextTxn(ctx)
var didFind bool
for i := range c.indexes {
@@ -424,7 +446,9 @@ func (c *collection) dropIndex(ctx context.Context, txn datastore.Txn, indexName
return nil
}
-func (c *collection) dropAllIndexes(ctx context.Context, txn datastore.Txn) error {
+func (c *collection) dropAllIndexes(ctx context.Context) error {
+ // callers of this function must set a context transaction
+ txn := mustGetContextTxn(ctx)
prefix := core.NewCollectionIndexKey(immutable.Some(c.ID()), "")
keys, err := datastore.FetchKeysForPrefix(ctx, prefix.ToString(), txn.Systemstore())
@@ -442,8 +466,8 @@ func (c *collection) dropAllIndexes(ctx context.Context, txn datastore.Txn) erro
return err
}
-func (c *collection) loadIndexes(ctx context.Context, txn datastore.Txn) error {
- indexDescriptions, err := c.db.fetchCollectionIndexDescriptions(ctx, txn, c.ID())
+func (c *collection) loadIndexes(ctx context.Context) error {
+ indexDescriptions, err := c.db.fetchCollectionIndexDescriptions(ctx, c.ID())
if err != nil {
return err
}
@@ -462,13 +486,13 @@ func (c *collection) loadIndexes(ctx context.Context, txn datastore.Txn) error {
// GetIndexes returns all indexes for the collection.
func (c *collection) GetIndexes(ctx context.Context) ([]client.IndexDescription, error) {
- txn, err := c.getTxn(ctx, false)
+ ctx, txn, err := ensureContextTxn(ctx, c.db, false)
if err != nil {
return nil, err
}
- defer c.discardImplicitTxn(ctx, txn)
+ defer txn.Discard(ctx)
- err = c.loadIndexes(ctx, txn)
+ err = c.loadIndexes(ctx)
if err != nil {
return nil, err
}
@@ -496,9 +520,11 @@ func (c *collection) checkExistingFields(
func (c *collection) generateIndexNameIfNeededAndCreateKey(
ctx context.Context,
- txn datastore.Txn,
desc *client.IndexDescription,
) (core.CollectionIndexKey, error) {
+ // callers of this function must set a context transaction
+ txn := mustGetContextTxn(ctx)
+
var indexKey core.CollectionIndexKey
if desc.Name == "" {
nameIncrement := 1
diff --git a/db/collection_update.go b/db/collection_update.go
index fc985d2c41..e59469715a 100644
--- a/db/collection_update.go
+++ b/db/collection_update.go
@@ -20,33 +20,10 @@ import (
"github.com/sourcenetwork/defradb/client"
"github.com/sourcenetwork/defradb/client/request"
- "github.com/sourcenetwork/defradb/datastore"
"github.com/sourcenetwork/defradb/errors"
"github.com/sourcenetwork/defradb/planner"
)
-// UpdateWith updates a target document using the given updater type. Target
-// can be a Filter statement, a single DocID, a single document,
-// an array of DocIDs, or an array of documents.
-// If you want more type safety, use the respective typed versions of Update.
-// Eg: UpdateWithFilter or UpdateWithDocID
-func (c *collection) UpdateWith(
- ctx context.Context,
- target any,
- updater string,
-) (*client.UpdateResult, error) {
- switch t := target.(type) {
- case string, map[string]any, *request.Filter:
- return c.UpdateWithFilter(ctx, t, updater)
- case client.DocID:
- return c.UpdateWithDocID(ctx, t, updater)
- case []client.DocID:
- return c.UpdateWithDocIDs(ctx, t, updater)
- default:
- return nil, client.ErrInvalidUpdateTarget
- }
-}
-
// UpdateWithFilter updates using a filter to target documents for update.
// An updater value is provided, which could be a string Patch, string Merge Patch
// or a parsed Patch, or parsed Merge Patch.
@@ -55,154 +32,21 @@ func (c *collection) UpdateWithFilter(
filter any,
updater string,
) (*client.UpdateResult, error) {
- txn, err := c.getTxn(ctx, false)
- if err != nil {
- return nil, err
- }
- defer c.discardImplicitTxn(ctx, txn)
- res, err := c.updateWithFilter(ctx, txn, filter, updater)
- if err != nil {
- return nil, err
- }
- return res, c.commitImplicitTxn(ctx, txn)
-}
-
-// UpdateWithDocID updates using a DocID to target a single document for update.
-// An updater value is provided, which could be a string Patch, string Merge Patch
-// or a parsed Patch, or parsed Merge Patch.
-func (c *collection) UpdateWithDocID(
- ctx context.Context,
- docID client.DocID,
- updater string,
-) (*client.UpdateResult, error) {
- txn, err := c.getTxn(ctx, false)
- if err != nil {
- return nil, err
- }
- defer c.discardImplicitTxn(ctx, txn)
- res, err := c.updateWithDocID(ctx, txn, docID, updater)
- if err != nil {
- return nil, err
- }
-
- return res, c.commitImplicitTxn(ctx, txn)
-}
-
-// UpdateWithDocIDs is the same as UpdateWithDocID but accepts multiple DocIDs as a slice.
-// An updater value is provided, which could be a string Patch, string Merge Patch
-// or a parsed Patch, or parsed Merge Patch.
-func (c *collection) UpdateWithDocIDs(
- ctx context.Context,
- docIDs []client.DocID,
- updater string,
-) (*client.UpdateResult, error) {
- txn, err := c.getTxn(ctx, false)
- if err != nil {
- return nil, err
- }
- defer c.discardImplicitTxn(ctx, txn)
- res, err := c.updateWithIDs(ctx, txn, docIDs, updater)
- if err != nil {
- return nil, err
- }
-
- return res, c.commitImplicitTxn(ctx, txn)
-}
-
-func (c *collection) updateWithDocID(
- ctx context.Context,
- txn datastore.Txn,
- docID client.DocID,
- updater string,
-) (*client.UpdateResult, error) {
- parsedUpdater, err := fastjson.Parse(updater)
- if err != nil {
- return nil, err
- }
-
- isPatch := false
- if parsedUpdater.Type() == fastjson.TypeArray {
- isPatch = true
- } else if parsedUpdater.Type() != fastjson.TypeObject {
- return nil, client.ErrInvalidUpdater
- }
-
- doc, err := c.Get(ctx, docID, false)
- if err != nil {
- return nil, err
- }
-
- if isPatch {
- // todo
- } else {
- err = doc.SetWithJSON([]byte(updater))
- }
+ ctx, txn, err := ensureContextTxn(ctx, c.db, false)
if err != nil {
return nil, err
}
+ defer txn.Discard(ctx)
- _, err = c.save(ctx, txn, doc, false)
+ res, err := c.updateWithFilter(ctx, filter, updater)
if err != nil {
return nil, err
}
-
- results := &client.UpdateResult{
- Count: 1,
- DocIDs: []string{docID.String()},
- }
- return results, nil
-}
-
-func (c *collection) updateWithIDs(
- ctx context.Context,
- txn datastore.Txn,
- docIDs []client.DocID,
- updater string,
-) (*client.UpdateResult, error) {
- parsedUpdater, err := fastjson.Parse(updater)
- if err != nil {
- return nil, err
- }
-
- isPatch := false
- if parsedUpdater.Type() == fastjson.TypeArray {
- isPatch = true
- } else if parsedUpdater.Type() != fastjson.TypeObject {
- return nil, client.ErrInvalidUpdater
- }
-
- results := &client.UpdateResult{
- DocIDs: make([]string, len(docIDs)),
- }
- for i, docIDs := range docIDs {
- doc, err := c.Get(ctx, docIDs, false)
- if err != nil {
- return nil, err
- }
-
- if isPatch {
- // todo
- } else {
- err = doc.SetWithJSON([]byte(updater))
- }
- if err != nil {
- return nil, err
- }
-
- _, err = c.save(ctx, txn, doc, false)
- if err != nil {
- return nil, err
- }
-
- results.DocIDs[i] = docIDs.String()
- results.Count++
- }
- return results, nil
+ return res, txn.Commit(ctx)
}
func (c *collection) updateWithFilter(
ctx context.Context,
- txn datastore.Txn,
filter any,
updater string,
) (*client.UpdateResult, error) {
@@ -223,7 +67,7 @@ func (c *collection) updateWithFilter(
}
// Make a selection plan that will scan through only the documents with matching filter.
- selectionPlan, err := c.makeSelectionPlan(ctx, txn, filter)
+ selectionPlan, err := c.makeSelectionPlan(ctx, filter)
if err != nil {
return nil, err
}
@@ -240,7 +84,7 @@ func (c *collection) updateWithFilter(
// If the plan isn't properly closed at any exit point log the error.
defer func() {
if err := selectionPlan.Close(); err != nil {
- log.ErrorE(ctx, "Failed to close the selection plan, after filter update", err)
+ log.ErrorContextE(ctx, "Failed to close the selection plan, after filter update", err)
}
}()
@@ -263,7 +107,7 @@ func (c *collection) updateWithFilter(
// Get the document, and apply the patch
docAsMap := docMap.ToMap(selectionPlan.Value())
- doc, err := client.NewDocFromMap(docAsMap, c.Schema())
+ doc, err := client.NewDocFromMap(docAsMap, c.Definition())
if err != nil {
return nil, err
}
@@ -277,7 +121,7 @@ func (c *collection) updateWithFilter(
}
}
- _, err = c.save(ctx, txn, doc, false)
+ err = c.update(ctx, doc)
if err != nil {
return nil, err
}
@@ -310,7 +154,6 @@ func (c *collection) isSecondaryIDField(fieldDesc client.FieldDefinition) (clien
// patched.
func (c *collection) patchPrimaryDoc(
ctx context.Context,
- txn datastore.Txn,
secondaryCollectionName string,
relationFieldDescription client.FieldDefinition,
docID string,
@@ -321,18 +164,15 @@ func (c *collection) patchPrimaryDoc(
return err
}
- primaryCol, err := c.db.getCollectionByName(ctx, txn, relationFieldDescription.Schema)
+ primaryCol, err := c.db.getCollectionByName(ctx, relationFieldDescription.Kind.Underlying())
if err != nil {
return err
}
- primaryCol = primaryCol.WithTxn(txn)
- primarySchema := primaryCol.Schema()
primaryField, ok := primaryCol.Description().GetFieldByRelation(
relationFieldDescription.RelationName,
secondaryCollectionName,
relationFieldDescription.Name,
- &primarySchema,
)
if !ok {
return client.NewErrFieldNotExist(relationFieldDescription.RelationName)
@@ -348,6 +188,7 @@ func (c *collection) patchPrimaryDoc(
primaryDocID,
false,
)
+
if err != nil && !errors.Is(err, ds.ErrNotFound) {
return err
}
@@ -357,8 +198,13 @@ func (c *collection) patchPrimaryDoc(
return nil
}
- pc := c.db.newCollection(primaryCol.Description(), primarySchema)
- err = pc.validateOneToOneLinkDoesntAlreadyExist(ctx, txn, primaryDocID.String(), primaryIDField, docID)
+ pc := c.db.newCollection(primaryCol.Description(), primaryCol.Schema())
+ err = pc.validateOneToOneLinkDoesntAlreadyExist(
+ ctx,
+ primaryDocID.String(),
+ primaryIDField,
+ docID,
+ )
if err != nil {
return err
}
@@ -391,7 +237,6 @@ func (c *collection) patchPrimaryDoc(
// Additionally it only requests for the root scalar fields of the object
func (c *collection) makeSelectionPlan(
ctx context.Context,
- txn datastore.Txn,
filter any,
) (planner.RequestPlan, error) {
var f immutable.Option[request.Filter]
@@ -417,7 +262,16 @@ func (c *collection) makeSelectionPlan(
return nil, err
}
- planner := planner.New(ctx, c.db.WithTxn(txn), txn)
+ txn := mustGetContextTxn(ctx)
+ identity := GetContextIdentity(ctx)
+ planner := planner.New(
+ ctx,
+ identity,
+ c.db.acp,
+ c.db,
+ txn,
+ )
+
return planner.MakePlan(&request.Request{
Queries: []*request.OperationDefinition{
{
@@ -434,8 +288,12 @@ func (c *collection) makeSelectLocal(filter immutable.Option[request.Filter]) (*
Field: request.Field{
Name: c.Name().Value(),
},
- Filter: filter,
- Fields: make([]request.Selection, 0),
+ Filterable: request.Filterable{
+ Filter: filter,
+ },
+ ChildSelect: request.ChildSelect{
+ Fields: make([]request.Selection, 0),
+ },
}
for _, fd := range c.Schema().Fields {
diff --git a/db/config.go b/db/config.go
new file mode 100644
index 0000000000..397956ed8b
--- /dev/null
+++ b/db/config.go
@@ -0,0 +1,73 @@
+// Copyright 2024 Democratized Data Foundation
+//
+// Use of this software is governed by the Business Source License
+// included in the file licenses/BSL.txt.
+//
+// As of the Change Date specified in that file, in accordance with
+// the Business Source License, use of this software will be governed
+// by the Apache License, Version 2.0, included in the file
+// licenses/APL.txt.
+
+package db
+
+import (
+ "context"
+
+ "github.com/lens-vm/lens/host-go/engine/module"
+ "github.com/sourcenetwork/immutable"
+
+ "github.com/sourcenetwork/defradb/acp"
+ "github.com/sourcenetwork/defradb/events"
+)
+
+const (
+ defaultMaxTxnRetries = 5
+ updateEventBufferSize = 100
+)
+
+// Option is a funtion that sets a config value on the db.
+type Option func(*db)
+
+// WithACP enables access control. If path is empty then acp runs in-memory.
+func WithACP(path string) Option {
+ return func(db *db) {
+ var acpLocal acp.ACPLocal
+ acpLocal.Init(context.Background(), path)
+ db.acp = immutable.Some[acp.ACP](&acpLocal)
+ }
+}
+
+// WithACPInMemory enables access control in-memory.
+func WithACPInMemory() Option { return WithACP("") }
+
+// WithUpdateEvents enables the update events channel.
+func WithUpdateEvents() Option {
+ return func(db *db) {
+ db.events = events.Events{
+ Updates: immutable.Some(events.New[events.Update](0, updateEventBufferSize)),
+ }
+ }
+}
+
+// WithMaxRetries sets the maximum number of retries per transaction.
+func WithMaxRetries(num int) Option {
+ return func(db *db) {
+ db.maxTxnRetries = immutable.Some(num)
+ }
+}
+
+// WithLensPoolSize sets the maximum number of cached migrations instances to preserve per schema version.
+//
+// Will default to `5` if not set.
+func WithLensPoolSize(size int) Option {
+ return func(db *db) {
+ db.lensPoolSize = immutable.Some(size)
+ }
+}
+
+// WithLensRuntime returns an option that sets the lens registry runtime.
+func WithLensRuntime(runtime module.Runtime) Option {
+ return func(db *db) {
+ db.lensRuntime = immutable.Some(runtime)
+ }
+}
diff --git a/db/config_test.go b/db/config_test.go
new file mode 100644
index 0000000000..02bd81a910
--- /dev/null
+++ b/db/config_test.go
@@ -0,0 +1,55 @@
+// Copyright 2024 Democratized Data Foundation
+//
+// Use of this software is governed by the Business Source License
+// included in the file licenses/BSL.txt.
+//
+// As of the Change Date specified in that file, in accordance with
+// the Business Source License, use of this software will be governed
+// by the Apache License, Version 2.0, included in the file
+// licenses/APL.txt.
+
+package db
+
+import (
+ "testing"
+
+ "github.com/lens-vm/lens/host-go/runtimes/wasmtime"
+ "github.com/stretchr/testify/assert"
+)
+
+func TestWithACP(t *testing.T) {
+ d := &db{}
+ WithACP("test")(d)
+ assert.True(t, d.acp.HasValue())
+}
+
+func TestWithACPInMemory(t *testing.T) {
+ d := &db{}
+ WithACPInMemory()(d)
+ assert.True(t, d.acp.HasValue())
+}
+
+func TestWithUpdateEvents(t *testing.T) {
+ d := &db{}
+ WithUpdateEvents()(d)
+ assert.NotNil(t, d.events)
+}
+
+func TestWithMaxRetries(t *testing.T) {
+ d := &db{}
+ WithMaxRetries(10)(d)
+ assert.True(t, d.maxTxnRetries.HasValue())
+ assert.Equal(t, 10, d.maxTxnRetries.Value())
+}
+
+func TestWithLensPoolSize(t *testing.T) {
+ d := &db{}
+ WithLensPoolSize(10)(d)
+ assert.Equal(t, 10, d.lensPoolSize.Value())
+}
+
+func TestWithLensRuntime(t *testing.T) {
+ d := &db{}
+ WithLensRuntime(wasmtime.New())(d)
+ assert.NotNil(t, d.lensRuntime.Value())
+}
diff --git a/db/context.go b/db/context.go
new file mode 100644
index 0000000000..88019af323
--- /dev/null
+++ b/db/context.go
@@ -0,0 +1,112 @@
+// Copyright 2024 Democratized Data Foundation
+//
+// Use of this software is governed by the Business Source License
+// included in the file licenses/BSL.txt.
+//
+// As of the Change Date specified in that file, in accordance with
+// the Business Source License, use of this software will be governed
+// by the Apache License, Version 2.0, included in the file
+// licenses/APL.txt.
+
+package db
+
+import (
+ "context"
+
+ "github.com/sourcenetwork/immutable"
+
+ acpIdentity "github.com/sourcenetwork/defradb/acp/identity"
+ "github.com/sourcenetwork/defradb/datastore"
+)
+
+// txnContextKey is the key type for transaction context values.
+type txnContextKey struct{}
+
+// identityContextKey is the key type for ACP identity context values.
+type identityContextKey struct{}
+
+// explicitTxn is a transaction that is managed outside of a db operation.
+type explicitTxn struct {
+ datastore.Txn
+}
+
+func (t *explicitTxn) Commit(ctx context.Context) error {
+ return nil // do nothing
+}
+
+func (t *explicitTxn) Discard(ctx context.Context) {
+ // do nothing
+}
+
+// transactionDB is a db that can create transactions.
+type transactionDB interface {
+ NewTxn(context.Context, bool) (datastore.Txn, error)
+}
+
+// ensureContextTxn ensures that the returned context has a transaction
+// and an identity.
+//
+// If a transactions exists on the context it will be made explicit,
+// otherwise a new implicit transaction will be created.
+//
+// The returned context will contain the transaction and identity
+// along with the copied values from the input context.
+func ensureContextTxn(ctx context.Context, db transactionDB, readOnly bool) (context.Context, datastore.Txn, error) {
+ // explicit transaction
+ txn, ok := TryGetContextTxn(ctx)
+ if ok {
+ return SetContextTxn(ctx, &explicitTxn{txn}), &explicitTxn{txn}, nil
+ }
+ // implicit transaction
+ txn, err := db.NewTxn(ctx, readOnly)
+ if err != nil {
+ return nil, txn, err
+ }
+ return SetContextTxn(ctx, txn), txn, nil
+}
+
+// mustGetContextTxn returns the transaction from the context or panics.
+//
+// This should only be called from private functions within the db package
+// where we ensure an implicit or explicit transaction always exists.
+func mustGetContextTxn(ctx context.Context) datastore.Txn {
+ return ctx.Value(txnContextKey{}).(datastore.Txn)
+}
+
+// TryGetContextTxn returns a transaction and a bool indicating if the
+// txn was retrieved from the given context.
+func TryGetContextTxn(ctx context.Context) (datastore.Txn, bool) {
+ txn, ok := ctx.Value(txnContextKey{}).(datastore.Txn)
+ return txn, ok
+}
+
+// SetContextTxn returns a new context with the txn value set.
+//
+// This will overwrite any previously set transaction value.
+func SetContextTxn(ctx context.Context, txn datastore.Txn) context.Context {
+ return context.WithValue(ctx, txnContextKey{}, txn)
+}
+
+// TryGetContextTxn returns an identity and a bool indicating if the
+// identity was retrieved from the given context.
+
+// GetContextIdentity returns the identity from the given context.
+//
+// If an identity does not exist `NoIdentity` is returned.
+func GetContextIdentity(ctx context.Context) immutable.Option[acpIdentity.Identity] {
+ identity, ok := ctx.Value(identityContextKey{}).(acpIdentity.Identity)
+ if ok {
+ return immutable.Some(identity)
+ }
+ return acpIdentity.None
+}
+
+// SetContextTxn returns a new context with the identity value set.
+//
+// This will overwrite any previously set identity value.
+func SetContextIdentity(ctx context.Context, identity immutable.Option[acpIdentity.Identity]) context.Context {
+ if identity.HasValue() {
+ return context.WithValue(ctx, identityContextKey{}, identity.Value())
+ }
+ return context.WithValue(ctx, identityContextKey{}, nil)
+}
diff --git a/db/context_test.go b/db/context_test.go
new file mode 100644
index 0000000000..c8b1a322e5
--- /dev/null
+++ b/db/context_test.go
@@ -0,0 +1,57 @@
+// Copyright 2024 Democratized Data Foundation
+//
+// Use of this software is governed by the Business Source License
+// included in the file licenses/BSL.txt.
+//
+// As of the Change Date specified in that file, in accordance with
+// the Business Source License, use of this software will be governed
+// by the Apache License, Version 2.0, included in the file
+// licenses/APL.txt.
+
+package db
+
+import (
+ "context"
+ "testing"
+
+ "github.com/stretchr/testify/assert"
+ "github.com/stretchr/testify/require"
+)
+
+func TestEnsureContextTxnExplicit(t *testing.T) {
+ ctx := context.Background()
+
+ db, err := newMemoryDB(ctx)
+ require.NoError(t, err)
+
+ txn, err := db.NewTxn(ctx, true)
+ require.NoError(t, err)
+
+ // set an explicit transaction
+ ctx = SetContextTxn(ctx, txn)
+
+ ctx, txn, err = ensureContextTxn(ctx, db, true)
+ require.NoError(t, err)
+
+ _, ok := txn.(*explicitTxn)
+ assert.True(t, ok)
+
+ _, ok = ctx.Value(txnContextKey{}).(*explicitTxn)
+ assert.True(t, ok)
+}
+
+func TestEnsureContextTxnImplicit(t *testing.T) {
+ ctx := context.Background()
+
+ db, err := newMemoryDB(ctx)
+ require.NoError(t, err)
+
+ ctx, txn, err := ensureContextTxn(ctx, db, true)
+ require.NoError(t, err)
+
+ _, ok := txn.(*explicitTxn)
+ assert.False(t, ok)
+
+ _, ok = ctx.Value(txnContextKey{}).(*explicitTxn)
+ assert.False(t, ok)
+}
diff --git a/db/db.go b/db/db.go
index 7b3ff7bcb8..613eea0b23 100644
--- a/db/db.go
+++ b/db/db.go
@@ -22,20 +22,22 @@ import (
blockstore "github.com/ipfs/boxo/blockstore"
ds "github.com/ipfs/go-datastore"
dsq "github.com/ipfs/go-datastore/query"
+ "github.com/lens-vm/lens/host-go/engine/module"
+ "github.com/sourcenetwork/corelog"
"github.com/sourcenetwork/immutable"
+ "github.com/sourcenetwork/defradb/acp"
"github.com/sourcenetwork/defradb/client"
"github.com/sourcenetwork/defradb/core"
"github.com/sourcenetwork/defradb/datastore"
"github.com/sourcenetwork/defradb/errors"
"github.com/sourcenetwork/defradb/events"
"github.com/sourcenetwork/defradb/lens"
- "github.com/sourcenetwork/defradb/logging"
"github.com/sourcenetwork/defradb/request/graphql"
)
var (
- log = logging.MustNewLogger("db")
+ log = corelog.NewLogger("db")
)
// make sure we match our client interface
@@ -43,10 +45,6 @@ var (
_ client.Collection = (*collection)(nil)
)
-const (
- defaultMaxTxnRetries = 5
-)
-
// DB is the main interface for interacting with the
// DefraDB storage system.
type db struct {
@@ -57,59 +55,41 @@ type db struct {
events events.Events
- parser core.Parser
+ parser core.Parser
+
+ // The maximum number of cached migrations instances to preserve per schema version.
+ lensPoolSize immutable.Option[int]
+ lensRuntime immutable.Option[module.Runtime]
+
lensRegistry client.LensRegistry
// The maximum number of retries per transaction.
maxTxnRetries immutable.Option[int]
- // The maximum number of cached migrations instances to preserve per schema version.
- lensPoolSize immutable.Option[int]
-
// The options used to init the database
- options any
+ options []Option
// The ID of the last transaction created.
previousTxnID atomic.Uint64
-}
-
-// Functional option type.
-type Option func(*db)
-const updateEventBufferSize = 100
-
-// WithUpdateEvents enables the update events channel.
-func WithUpdateEvents() Option {
- return func(db *db) {
- db.events = events.Events{
- Updates: immutable.Some(events.New[events.Update](0, updateEventBufferSize)),
- }
- }
-}
-
-// WithMaxRetries sets the maximum number of retries per transaction.
-func WithMaxRetries(num int) Option {
- return func(db *db) {
- db.maxTxnRetries = immutable.Some(num)
- }
-}
-
-// WithLensPoolSize sets the maximum number of cached migrations instances to preserve per schema version.
-//
-// Will default to `5` if not set.
-func WithLensPoolSize(num int) Option {
- return func(db *db) {
- db.lensPoolSize = immutable.Some(num)
- }
+ // Contains ACP if it exists
+ acp immutable.Option[acp.ACP]
}
// NewDB creates a new instance of the DB using the given options.
-func NewDB(ctx context.Context, rootstore datastore.RootStore, options ...Option) (client.DB, error) {
+func NewDB(
+ ctx context.Context,
+ rootstore datastore.RootStore,
+ options ...Option,
+) (client.DB, error) {
return newDB(ctx, rootstore, options...)
}
-func newDB(ctx context.Context, rootstore datastore.RootStore, options ...Option) (*implicitTxnDB, error) {
- log.Debug(ctx, "Loading: internal datastores")
+func newDB(
+ ctx context.Context,
+ rootstore datastore.RootStore,
+ options ...Option,
+) (*db, error) {
multistore := datastore.MultiStoreFrom(rootstore)
parser, err := graphql.NewParser()
@@ -120,29 +100,26 @@ func newDB(ctx context.Context, rootstore datastore.RootStore, options ...Option
db := &db{
rootstore: rootstore,
multistore: multistore,
-
- parser: parser,
- options: options,
+ acp: acp.NoACP,
+ parser: parser,
+ options: options,
}
// apply options
for _, opt := range options {
- if opt == nil {
- continue
- }
opt(db)
}
- // lensPoolSize may be set by `options`, and because they are funcs on db
+ // lens options may be set by `WithLens` funcs, and because they are funcs on db
// we have to mutate `db` here to set the registry.
- db.lensRegistry = lens.NewRegistry(db.lensPoolSize, db)
+ db.lensRegistry = lens.NewRegistry(db, db.lensPoolSize, db.lensRuntime)
err = db.initialize(ctx)
if err != nil {
return nil, err
}
- return &implicitTxnDB{db}, nil
+ return db, nil
}
// NewTxn creates a new transaction.
@@ -157,15 +134,6 @@ func (db *db) NewConcurrentTxn(ctx context.Context, readonly bool) (datastore.Tx
return datastore.NewConcurrentTxnFrom(ctx, db.rootstore, txnId, readonly)
}
-// WithTxn returns a new [client.Store] that respects the given transaction.
-func (db *db) WithTxn(txn datastore.Txn) client.Store {
- return &explicitTxnDB{
- db: db,
- txn: txn,
- lensRegistry: db.lensRegistry.WithTxn(txn),
- }
-}
-
// Root returns the root datastore.
func (db *db) Root() datastore.RootStore {
return db.rootstore
@@ -185,19 +153,47 @@ func (db *db) LensRegistry() client.LensRegistry {
return db.lensRegistry
}
+func (db *db) AddPolicy(
+ ctx context.Context,
+ policy string,
+) (client.AddPolicyResult, error) {
+ if !db.acp.HasValue() {
+ return client.AddPolicyResult{}, client.ErrPolicyAddFailureNoACP
+ }
+ identity := GetContextIdentity(ctx)
+ policyID, err := db.acp.Value().AddPolicy(
+ ctx,
+ identity.Value().String(),
+ policy,
+ )
+
+ if err != nil {
+ return client.AddPolicyResult{}, err
+ }
+
+ return client.AddPolicyResult{PolicyID: policyID}, nil
+}
+
// Initialize is called when a database is first run and creates all the db global meta data
// like Collection ID counters.
func (db *db) initialize(ctx context.Context) error {
db.glock.Lock()
defer db.glock.Unlock()
- txn, err := db.NewTxn(ctx, false)
+ ctx, txn, err := ensureContextTxn(ctx, db, false)
if err != nil {
return err
}
defer txn.Discard(ctx)
- log.Debug(ctx, "Checking if DB has already been initialized...")
+ // Start acp if enabled, this will recover previous state if there is any.
+ if db.acp.HasValue() {
+ // db is responsible to call db.acp.Close() to free acp resources while closing.
+ if err = db.acp.Value().Start(ctx); err != nil {
+ return err
+ }
+ }
+
exists, err := txn.Systemstore().Has(ctx, ds.NewKey("init"))
if err != nil && !errors.Is(err, ds.ErrNotFound) {
return err
@@ -205,8 +201,7 @@ func (db *db) initialize(ctx context.Context) error {
// if we're loading an existing database, just load the schema
// and migrations and finish initialization
if exists {
- log.Debug(ctx, "DB has already been initialized, continuing")
- err = db.loadSchema(ctx, txn)
+ err = db.loadSchema(ctx)
if err != nil {
return err
}
@@ -222,11 +217,9 @@ func (db *db) initialize(ctx context.Context) error {
return txn.Commit(ctx)
}
- log.Debug(ctx, "Opened a new DB, needs full initialization")
-
// init meta data
// collection sequence
- _, err = db.getSequence(ctx, txn, core.CollectionIDSequenceKey{})
+ _, err = db.getSequence(ctx, core.CollectionIDSequenceKey{})
if err != nil {
return err
}
@@ -261,16 +254,23 @@ func (db *db) PrintDump(ctx context.Context) error {
// Close is called when we are shutting down the database.
// This is the place for any last minute cleanup or releasing of resources (i.e.: Badger instance).
func (db *db) Close() {
- log.Info(context.Background(), "Closing DefraDB process...")
+ log.Info("Closing DefraDB process...")
if db.events.Updates.HasValue() {
db.events.Updates.Value().Close()
}
err := db.rootstore.Close()
if err != nil {
- log.ErrorE(context.Background(), "Failure closing running process", err)
+ log.ErrorE("Failure closing running process", err)
}
- log.Info(context.Background(), "Successfully closed running process")
+
+ if db.acp.HasValue() {
+ if err := db.acp.Value().Close(); err != nil {
+ log.ErrorE("Failure closing acp", err)
+ }
+ }
+
+ log.Info("Successfully closed running process")
}
func printStore(ctx context.Context, store datastore.DSReaderWriter) error {
@@ -286,7 +286,7 @@ func printStore(ctx context.Context, store datastore.DSReaderWriter) error {
}
for r := range results.Next() {
- log.Info(ctx, "", logging.NewKV(r.Key, r.Value))
+ log.InfoContext(ctx, "", corelog.Any(r.Key, r.Value))
}
return results.Close()
diff --git a/db/db_test.go b/db/db_test.go
index 237a1f21ed..118adb285b 100644
--- a/db/db_test.go
+++ b/db/db_test.go
@@ -19,7 +19,7 @@ import (
badgerds "github.com/sourcenetwork/defradb/datastore/badger/v4"
)
-func newMemoryDB(ctx context.Context) (*implicitTxnDB, error) {
+func newMemoryDB(ctx context.Context) (*db, error) {
opts := badgerds.Options{Options: badger.DefaultOptions("").WithInMemory(true)}
rootstore, err := badgerds.NewDatastore("", &opts)
if err != nil {
diff --git a/db/description/collection.go b/db/description/collection.go
index 8ffd473053..3658d3d318 100644
--- a/db/description/collection.go
+++ b/db/description/collection.go
@@ -13,8 +13,10 @@ package description
import (
"context"
"encoding/json"
+ "errors"
"sort"
+ ds "github.com/ipfs/go-datastore"
"github.com/ipfs/go-datastore/query"
"github.com/sourcenetwork/defradb/client"
@@ -29,6 +31,11 @@ func SaveCollection(
txn datastore.Txn,
desc client.CollectionDescription,
) (client.CollectionDescription, error) {
+ existing, err := GetCollectionByID(ctx, txn, desc.ID)
+ if err != nil && !errors.Is(err, ds.ErrNotFound) {
+ return client.CollectionDescription{}, err
+ }
+
buf, err := json.Marshal(desc)
if err != nil {
return client.CollectionDescription{}, err
@@ -40,6 +47,35 @@ func SaveCollection(
return client.CollectionDescription{}, err
}
+ if existing.Name.HasValue() && existing.Name != desc.Name {
+ nameKey := core.NewCollectionNameKey(existing.Name.Value())
+ idBuf, err := txn.Systemstore().Get(ctx, nameKey.ToDS())
+ nameIndexExsts := true
+ if err != nil {
+ if errors.Is(err, ds.ErrNotFound) {
+ nameIndexExsts = false
+ } else {
+ return client.CollectionDescription{}, err
+ }
+ }
+ if nameIndexExsts {
+ var keyID uint32
+ err = json.Unmarshal(idBuf, &keyID)
+ if err != nil {
+ return client.CollectionDescription{}, err
+ }
+
+ if keyID == desc.ID {
+ // The name index may have already been overwritten, pointing at another collection
+ // we should only remove the existing index if it still points at this collection
+ err := txn.Systemstore().Delete(ctx, nameKey.ToDS())
+ if err != nil {
+ return client.CollectionDescription{}, err
+ }
+ }
+ }
+ }
+
if desc.Name.HasValue() {
idBuf, err := json.Marshal(desc.ID)
if err != nil {
@@ -201,7 +237,7 @@ func GetCollections(
txn datastore.Txn,
) ([]client.CollectionDescription, error) {
q, err := txn.Systemstore().Query(ctx, query.Query{
- Prefix: core.COLLECTION,
+ Prefix: core.COLLECTION_ID,
})
if err != nil {
return nil, NewErrFailedToCreateCollectionQuery(err)
diff --git a/db/errors.go b/db/errors.go
index 34dd0d53b5..f917ee9724 100644
--- a/db/errors.go
+++ b/db/errors.go
@@ -16,95 +16,117 @@ import (
)
const (
- errFailedToGetHeads string = "failed to get document heads"
- errFailedToCreateCollectionQuery string = "failed to create collection prefix query"
- errFailedToGetCollection string = "failed to get collection"
- errFailedToGetAllCollections string = "failed to get all collections"
- errDocVerification string = "the document verification failed"
- errAddingP2PCollection string = "cannot add collection ID"
- errRemovingP2PCollection string = "cannot remove collection ID"
- errAddCollectionWithPatch string = "unknown collection, adding collections via patch is not supported"
- errCollectionIDDoesntMatch string = "CollectionID does not match existing"
- errSchemaRootDoesntMatch string = "SchemaRoot does not match existing"
- errCannotModifySchemaName string = "modifying the schema name is not supported"
- errCannotSetVersionID string = "setting the VersionID is not supported. It is updated automatically"
- errRelationalFieldMissingSchema string = "a `Schema` [name] must be provided when adding a new relation field"
- errRelationalFieldInvalidRelationType string = "invalid RelationType"
- errRelationalFieldMissingIDField string = "missing id field for relation object field"
- errRelationalFieldMissingRelationName string = "missing relation name"
- errPrimarySideNotDefined string = "primary side of relation not defined"
- errPrimarySideOnMany string = "cannot set the many side of a relation as primary"
- errBothSidesPrimary string = "both sides of a relation cannot be primary"
- errRelatedFieldKindMismatch string = "invalid Kind of the related field"
- errRelatedFieldRelationTypeMismatch string = "invalid RelationType of the related field"
- errRelationalFieldIDInvalidType string = "relational id field of invalid kind"
- errDuplicateField string = "duplicate field"
- errCannotMutateField string = "mutating an existing field is not supported"
- errCannotMoveField string = "moving fields is not currently supported"
- errCannotDeleteField string = "deleting an existing field is not supported"
- errFieldKindNotFound string = "no type found for given name"
- errFieldKindDoesNotMatchFieldSchema string = "field Kind does not match field Schema"
- errSchemaNotFound string = "no schema found for given name"
- errDocumentAlreadyExists string = "a document with the given ID already exists"
- errDocumentDeleted string = "a document with the given ID has been deleted"
- errIndexMissingFields string = "index missing fields"
- errNonZeroIndexIDProvided string = "non-zero index ID provided"
- errIndexFieldMissingName string = "index field missing name"
- errIndexFieldMissingDirection string = "index field missing direction"
- errIndexWithNameAlreadyExists string = "index with name already exists"
- errInvalidStoredIndex string = "invalid stored index"
- errInvalidStoredIndexKey string = "invalid stored index key"
- errNonExistingFieldForIndex string = "creating an index on a non-existing property"
- errCollectionDoesntExisting string = "collection with given name doesn't exist"
- errFailedToStoreIndexedField string = "failed to store indexed field"
- errFailedToReadStoredIndexDesc string = "failed to read stored index description"
- errCanNotDeleteIndexedField string = "can not delete indexed field"
- errCanNotAddIndexWithPatch string = "adding indexes via patch is not supported"
- errCanNotDropIndexWithPatch string = "dropping indexes via patch is not supported"
- errCanNotChangeIndexWithPatch string = "changing indexes via patch is not supported"
- errIndexWithNameDoesNotExists string = "index with name doesn't exists"
- errCorruptedIndex string = "corrupted index. Please delete and recreate the index"
- errInvalidFieldValue string = "invalid field value"
- errUnsupportedIndexFieldType string = "unsupported index field type"
- errIndexDescriptionHasNoFields string = "index description has no fields"
- errFieldOrAliasToFieldNotExist string = "The given field or alias to field does not exist"
- errCreateFile string = "failed to create file"
- errRemoveFile string = "failed to remove file"
- errOpenFile string = "failed to open file"
- errCloseFile string = "failed to close file"
- errFailedtoCloseQueryReqAllIDs string = "failed to close query requesting all docIDs"
- errFailedToReadByte string = "failed to read byte"
- errFailedToWriteString string = "failed to write string"
- errJSONDecode string = "failed to decode JSON"
- errDocFromMap string = "failed to create a new doc from map"
- errDocCreate string = "failed to save a new doc to collection"
- errDocUpdate string = "failed to update doc to collection"
- errExpectedJSONObject string = "expected JSON object"
- errExpectedJSONArray string = "expected JSON array"
- errOneOneAlreadyLinked string = "target document is already linked to another document"
- errIndexDoesNotMatchName string = "the index used does not match the given name"
- errCanNotIndexNonUniqueFields string = "can not index a doc's field(s) that violates unique index"
- errInvalidViewQuery string = "the query provided is not valid as a View"
+ errFailedToGetHeads string = "failed to get document heads"
+ errFailedToCreateCollectionQuery string = "failed to create collection prefix query"
+ errFailedToGetCollection string = "failed to get collection"
+ errFailedToGetAllCollections string = "failed to get all collections"
+ errDocVerification string = "the document verification failed"
+ errAddingP2PCollection string = "cannot add collection ID"
+ errRemovingP2PCollection string = "cannot remove collection ID"
+ errAddCollectionWithPatch string = "adding collections via patch is not supported"
+ errCollectionIDDoesntMatch string = "CollectionID does not match existing"
+ errSchemaRootDoesntMatch string = "SchemaRoot does not match existing"
+ errCannotModifySchemaName string = "modifying the schema name is not supported"
+ errCannotSetVersionID string = "setting the VersionID is not supported"
+ errRelationalFieldInvalidRelationType string = "invalid RelationType"
+ errRelationalFieldMissingIDField string = "missing id field for relation object field"
+ errRelatedFieldKindMismatch string = "invalid Kind of the related field"
+ errRelatedFieldRelationTypeMismatch string = "invalid RelationType of the related field"
+ errRelationalFieldIDInvalidType string = "relational id field of invalid kind"
+ errDuplicateField string = "duplicate field"
+ errCannotMutateField string = "mutating an existing field is not supported"
+ errCannotMoveField string = "moving fields is not currently supported"
+ errCannotDeleteField string = "deleting an existing field is not supported"
+ errFieldKindNotFound string = "no type found for given name"
+ errFieldKindDoesNotMatchFieldSchema string = "field Kind does not match field Schema"
+ errDocumentAlreadyExists string = "a document with the given ID already exists"
+ errDocumentDeleted string = "a document with the given ID has been deleted"
+ errIndexMissingFields string = "index missing fields"
+ errNonZeroIndexIDProvided string = "non-zero index ID provided"
+ errIndexFieldMissingName string = "index field missing name"
+ errIndexFieldMissingDirection string = "index field missing direction"
+ errIndexWithNameAlreadyExists string = "index with name already exists"
+ errInvalidStoredIndex string = "invalid stored index"
+ errInvalidStoredIndexKey string = "invalid stored index key"
+ errNonExistingFieldForIndex string = "creating an index on a non-existing property"
+ errCollectionDoesntExisting string = "collection with given name doesn't exist"
+ errFailedToStoreIndexedField string = "failed to store indexed field"
+ errFailedToReadStoredIndexDesc string = "failed to read stored index description"
+ errCanNotDeleteIndexedField string = "can not delete indexed field"
+ errCanNotAddIndexWithPatch string = "adding indexes via patch is not supported"
+ errCanNotDropIndexWithPatch string = "dropping indexes via patch is not supported"
+ errCanNotChangeIndexWithPatch string = "changing indexes via patch is not supported"
+ errIndexWithNameDoesNotExists string = "index with name doesn't exists"
+ errCorruptedIndex string = "corrupted index. Please delete and recreate the index"
+ errInvalidFieldValue string = "invalid field value"
+ errUnsupportedIndexFieldType string = "unsupported index field type"
+ errIndexDescriptionHasNoFields string = "index description has no fields"
+ errFieldOrAliasToFieldNotExist string = "The given field or alias to field does not exist"
+ errCreateFile string = "failed to create file"
+ errRemoveFile string = "failed to remove file"
+ errOpenFile string = "failed to open file"
+ errCloseFile string = "failed to close file"
+ errFailedtoCloseQueryReqAllIDs string = "failed to close query requesting all docIDs"
+ errFailedToReadByte string = "failed to read byte"
+ errFailedToWriteString string = "failed to write string"
+ errJSONDecode string = "failed to decode JSON"
+ errDocFromMap string = "failed to create a new doc from map"
+ errDocCreate string = "failed to save a new doc to collection"
+ errDocUpdate string = "failed to update doc to collection"
+ errExpectedJSONObject string = "expected JSON object"
+ errExpectedJSONArray string = "expected JSON array"
+ errOneOneAlreadyLinked string = "target document is already linked to another document"
+ errIndexDoesNotMatchName string = "the index used does not match the given name"
+ errCanNotIndexNonUniqueFields string = "can not index a doc's field(s) that violates unique index"
+ errInvalidViewQuery string = "the query provided is not valid as a View"
+ errCollectionAlreadyExists string = "collection already exists"
+ errMultipleActiveCollectionVersions string = "multiple versions of same collection cannot be active"
+ errCollectionSourcesCannotBeAddedRemoved string = "collection sources cannot be added or removed"
+ errCollectionSourceIDMutated string = "collection source ID cannot be mutated"
+ errCollectionIndexesCannotBeMutated string = "collection indexes cannot be mutated"
+ errCollectionFieldsCannotBeMutated string = "collection fields cannot be mutated"
+ errCollectionPolicyCannotBeMutated string = "collection policy cannot be mutated"
+ errCollectionRootIDCannotBeMutated string = "collection root ID cannot be mutated"
+ errCollectionSchemaVersionIDCannotBeMutated string = "collection schema version ID cannot be mutated"
+ errCollectionIDCannotBeZero string = "collection ID cannot be zero"
+ errCollectionsCannotBeDeleted string = "collections cannot be deleted"
+ errCanNotHavePolicyWithoutACP string = "can not specify policy on collection, without acp"
+ errSecondaryFieldOnSchema string = "secondary relation fields cannot be defined on the schema"
+ errRelationMissingField string = "relation missing field"
)
var (
- ErrFailedToGetCollection = errors.New(errFailedToGetCollection)
- ErrSubscriptionsNotAllowed = errors.New("server does not accept subscriptions")
- ErrInvalidFilter = errors.New("invalid filter")
- ErrCollectionAlreadyExists = errors.New("collection already exists")
- ErrCollectionNameEmpty = errors.New("collection name can't be empty")
- ErrSchemaNameEmpty = errors.New("schema name can't be empty")
- ErrSchemaRootEmpty = errors.New("schema root can't be empty")
- ErrSchemaVersionIDEmpty = errors.New("schema version ID can't be empty")
- ErrKeyEmpty = errors.New("key cannot be empty")
- ErrCannotSetVersionID = errors.New(errCannotSetVersionID)
- ErrIndexMissingFields = errors.New(errIndexMissingFields)
- ErrIndexFieldMissingName = errors.New(errIndexFieldMissingName)
- ErrCorruptedIndex = errors.New(errCorruptedIndex)
- ErrExpectedJSONObject = errors.New(errExpectedJSONObject)
- ErrExpectedJSONArray = errors.New(errExpectedJSONArray)
- ErrInvalidViewQuery = errors.New(errInvalidViewQuery)
- ErrCanNotIndexNonUniqueFields = errors.New(errCanNotIndexNonUniqueFields)
+ ErrFailedToGetCollection = errors.New(errFailedToGetCollection)
+ ErrCanNotCreateIndexOnCollectionWithPolicy = errors.New("can not create index on a collection with a policy")
+ ErrSubscriptionsNotAllowed = errors.New("server does not accept subscriptions")
+ ErrInvalidFilter = errors.New("invalid filter")
+ ErrCollectionAlreadyExists = errors.New(errCollectionAlreadyExists)
+ ErrCollectionNameEmpty = errors.New("collection name can't be empty")
+ ErrSchemaNameEmpty = errors.New("schema name can't be empty")
+ ErrSchemaRootEmpty = errors.New("schema root can't be empty")
+ ErrSchemaVersionIDEmpty = errors.New("schema version ID can't be empty")
+ ErrKeyEmpty = errors.New("key cannot be empty")
+ ErrCannotSetVersionID = errors.New(errCannotSetVersionID)
+ ErrIndexMissingFields = errors.New(errIndexMissingFields)
+ ErrIndexFieldMissingName = errors.New(errIndexFieldMissingName)
+ ErrCorruptedIndex = errors.New(errCorruptedIndex)
+ ErrExpectedJSONObject = errors.New(errExpectedJSONObject)
+ ErrExpectedJSONArray = errors.New(errExpectedJSONArray)
+ ErrInvalidViewQuery = errors.New(errInvalidViewQuery)
+ ErrCanNotIndexNonUniqueFields = errors.New(errCanNotIndexNonUniqueFields)
+ ErrMultipleActiveCollectionVersions = errors.New(errMultipleActiveCollectionVersions)
+ ErrCollectionSourcesCannotBeAddedRemoved = errors.New(errCollectionSourcesCannotBeAddedRemoved)
+ ErrCollectionSourceIDMutated = errors.New(errCollectionSourceIDMutated)
+ ErrCollectionIndexesCannotBeMutated = errors.New(errCollectionIndexesCannotBeMutated)
+ ErrCollectionFieldsCannotBeMutated = errors.New(errCollectionFieldsCannotBeMutated)
+ ErrCollectionRootIDCannotBeMutated = errors.New(errCollectionRootIDCannotBeMutated)
+ ErrCollectionSchemaVersionIDCannotBeMutated = errors.New(errCollectionSchemaVersionIDCannotBeMutated)
+ ErrCollectionIDCannotBeZero = errors.New(errCollectionIDCannotBeZero)
+ ErrCollectionsCannotBeDeleted = errors.New(errCollectionsCannotBeDeleted)
+ ErrCanNotHavePolicyWithoutACP = errors.New(errCanNotHavePolicyWithoutACP)
+ ErrSecondaryFieldOnSchema = errors.New(errSecondaryFieldOnSchema)
+ ErrRelationMissingField = errors.New(errRelationMissingField)
+ ErrMultipleRelationPrimaries = errors.New("relation can only have a single field set as primary")
)
// NewErrFailedToGetHeads returns a new error indicating that the heads of a document
@@ -208,6 +230,13 @@ func NewErrAddCollectionWithPatch(name string) error {
)
}
+func NewErrAddCollectionIDWithPatch(id uint32) error {
+ return errors.New(
+ errAddCollectionWithPatch,
+ errors.NewKV("ID", id),
+ )
+}
+
func NewErrCollectionIDDoesntMatch(name string, existingID, proposedID uint32) error {
return errors.New(
errCollectionIDDoesntMatch,
@@ -234,14 +263,6 @@ func NewErrCannotModifySchemaName(existingName, proposedName string) error {
)
}
-func NewErrRelationalFieldMissingSchema(name string, kind client.FieldKind) error {
- return errors.New(
- errRelationalFieldMissingSchema,
- errors.NewKV("Field", name),
- errors.NewKV("Kind", kind),
- )
-}
-
func NewErrRelationalFieldMissingIDField(name string, expectedName string) error {
return errors.New(
errRelationalFieldMissingIDField,
@@ -250,34 +271,6 @@ func NewErrRelationalFieldMissingIDField(name string, expectedName string) error
)
}
-func NewErrRelationalFieldMissingRelationName(name string) error {
- return errors.New(
- errRelationalFieldMissingRelationName,
- errors.NewKV("Field", name),
- )
-}
-
-func NewErrPrimarySideNotDefined(relationName string) error {
- return errors.New(
- errPrimarySideNotDefined,
- errors.NewKV("RelationName", relationName),
- )
-}
-
-func NewErrPrimarySideOnMany(name string) error {
- return errors.New(
- errPrimarySideOnMany,
- errors.NewKV("Field", name),
- )
-}
-
-func NewErrBothSidesPrimary(relationName string) error {
- return errors.New(
- errBothSidesPrimary,
- errors.NewKV("RelationName", relationName),
- )
-}
-
func NewErrRelatedFieldKindMismatch(relationName string, expected client.FieldKind, actual client.FieldKind) error {
return errors.New(
errRelatedFieldKindMismatch,
@@ -296,9 +289,10 @@ func NewErrRelationalFieldIDInvalidType(name string, expected, actual client.Fie
)
}
-func NewErrFieldKindNotFound(kind string) error {
+func NewErrFieldKindNotFound(name string, kind string) error {
return errors.New(
errFieldKindNotFound,
+ errors.NewKV("Field", name),
errors.NewKV("Kind", kind),
)
}
@@ -311,14 +305,6 @@ func NewErrFieldKindDoesNotMatchFieldSchema(kind string, schema string) error {
)
}
-func NewErrSchemaNotFound(name string, schema string) error {
- return errors.New(
- errSchemaNotFound,
- errors.NewKV("Field", name),
- errors.NewKV("Schema", schema),
- )
-}
-
func NewErrDuplicateField(name string) error {
return errors.New(errDuplicateField, errors.NewKV("Name", name))
}
@@ -543,3 +529,98 @@ func NewErrInvalidViewQueryMissingQuery() error {
errors.NewKV("Reason", "No query provided"),
)
}
+
+func NewErrCollectionAlreadyExists(name string) error {
+ return errors.New(
+ errCollectionAlreadyExists,
+ errors.NewKV("Name", name),
+ )
+}
+
+func NewErrCollectionIDAlreadyExists(id uint32) error {
+ return errors.New(
+ errCollectionAlreadyExists,
+ errors.NewKV("ID", id),
+ )
+}
+
+func NewErrMultipleActiveCollectionVersions(name string, root uint32) error {
+ return errors.New(
+ errMultipleActiveCollectionVersions,
+ errors.NewKV("Name", name),
+ errors.NewKV("Root", root),
+ )
+}
+
+func NewErrCollectionSourcesCannotBeAddedRemoved(colID uint32) error {
+ return errors.New(
+ errCollectionSourcesCannotBeAddedRemoved,
+ errors.NewKV("CollectionID", colID),
+ )
+}
+
+func NewErrCollectionSourceIDMutated(colID uint32, newSrcID uint32, oldSrcID uint32) error {
+ return errors.New(
+ errCollectionSourceIDMutated,
+ errors.NewKV("CollectionID", colID),
+ errors.NewKV("NewCollectionSourceID", newSrcID),
+ errors.NewKV("OldCollectionSourceID", oldSrcID),
+ )
+}
+
+func NewErrCollectionIndexesCannotBeMutated(colID uint32) error {
+ return errors.New(
+ errCollectionIndexesCannotBeMutated,
+ errors.NewKV("CollectionID", colID),
+ )
+}
+
+func NewErrCollectionFieldsCannotBeMutated(colID uint32) error {
+ return errors.New(
+ errCollectionFieldsCannotBeMutated,
+ errors.NewKV("CollectionID", colID),
+ )
+}
+
+func NewErrCollectionPolicyCannotBeMutated(colID uint32) error {
+ return errors.New(
+ errCollectionPolicyCannotBeMutated,
+ errors.NewKV("CollectionID", colID),
+ )
+}
+
+func NewErrCollectionRootIDCannotBeMutated(colID uint32) error {
+ return errors.New(
+ errCollectionRootIDCannotBeMutated,
+ errors.NewKV("CollectionID", colID),
+ )
+}
+
+func NewErrCollectionSchemaVersionIDCannotBeMutated(colID uint32) error {
+ return errors.New(
+ errCollectionSchemaVersionIDCannotBeMutated,
+ errors.NewKV("CollectionID", colID),
+ )
+}
+
+func NewErrCollectionsCannotBeDeleted(colID uint32) error {
+ return errors.New(
+ errCollectionsCannotBeDeleted,
+ errors.NewKV("CollectionID", colID),
+ )
+}
+
+func NewErrSecondaryFieldOnSchema(name string) error {
+ return errors.New(
+ errSecondaryFieldOnSchema,
+ errors.NewKV("Name", name),
+ )
+}
+
+func NewErrRelationMissingField(objectName, relationName string) error {
+ return errors.New(
+ errRelationMissingField,
+ errors.NewKV("Object", objectName),
+ errors.NewKV("RelationName", relationName),
+ )
+}
diff --git a/db/fetcher/encoded_doc.go b/db/fetcher/encoded_doc.go
index 889aea848a..cb4345abe1 100644
--- a/db/fetcher/encoded_doc.go
+++ b/db/fetcher/encoded_doc.go
@@ -106,13 +106,13 @@ func (encdoc *encodedDocument) Reset() {
}
// Decode returns a properly decoded document object
-func Decode(encdoc EncodedDocument, sd client.SchemaDescription) (*client.Document, error) {
+func Decode(encdoc EncodedDocument, collectionDefinition client.CollectionDefinition) (*client.Document, error) {
docID, err := client.NewDocIDFromString(string(encdoc.ID()))
if err != nil {
return nil, err
}
- doc := client.NewDocWithID(docID, sd)
+ doc := client.NewDocWithID(docID, collectionDefinition)
properties, err := encdoc.Properties(false)
if err != nil {
return nil, err
diff --git a/db/fetcher/fetcher.go b/db/fetcher/fetcher.go
index e4bb08cee4..894361dea4 100644
--- a/db/fetcher/fetcher.go
+++ b/db/fetcher/fetcher.go
@@ -18,11 +18,16 @@ import (
"github.com/bits-and-blooms/bitset"
dsq "github.com/ipfs/go-datastore/query"
+ "github.com/sourcenetwork/immutable"
+
+ "github.com/sourcenetwork/defradb/acp"
+ acpIdentity "github.com/sourcenetwork/defradb/acp/identity"
"github.com/sourcenetwork/defradb/client"
"github.com/sourcenetwork/defradb/core"
"github.com/sourcenetwork/defradb/datastore"
"github.com/sourcenetwork/defradb/datastore/iterable"
"github.com/sourcenetwork/defradb/db/base"
+ "github.com/sourcenetwork/defradb/db/permission"
"github.com/sourcenetwork/defradb/planner/mapper"
"github.com/sourcenetwork/defradb/request/graphql/parser"
)
@@ -56,7 +61,9 @@ func (s *ExecInfo) Reset() {
type Fetcher interface {
Init(
ctx context.Context,
+ identity immutable.Option[acpIdentity.Identity],
txn datastore.Txn,
+ acp immutable.Option[acp.ACP],
col client.Collection,
fields []client.FieldDefinition,
filter *mapper.Filter,
@@ -81,6 +88,10 @@ var (
// DocumentFetcher is a utility to incrementally fetch all the documents.
type DocumentFetcher struct {
+ identity immutable.Option[acpIdentity.Identity]
+ acp immutable.Option[acp.ACP]
+ passedPermissionCheck bool // have valid permission to access
+
col client.Collection
reverse bool
deletedDocs bool
@@ -136,7 +147,9 @@ type DocumentFetcher struct {
// Init implements DocumentFetcher.
func (df *DocumentFetcher) Init(
ctx context.Context,
+ identity immutable.Option[acpIdentity.Identity],
txn datastore.Txn,
+ acp immutable.Option[acp.ACP],
col client.Collection,
fields []client.FieldDefinition,
filter *mapper.Filter,
@@ -146,7 +159,7 @@ func (df *DocumentFetcher) Init(
) error {
df.txn = txn
- err := df.init(col, fields, filter, docmapper, reverse)
+ err := df.init(identity, acp, col, fields, filter, docmapper, reverse)
if err != nil {
return err
}
@@ -156,19 +169,23 @@ func (df *DocumentFetcher) Init(
df.deletedDocFetcher = new(DocumentFetcher)
df.deletedDocFetcher.txn = txn
}
- return df.deletedDocFetcher.init(col, fields, filter, docmapper, reverse)
+ return df.deletedDocFetcher.init(identity, acp, col, fields, filter, docmapper, reverse)
}
return nil
}
func (df *DocumentFetcher) init(
+ identity immutable.Option[acpIdentity.Identity],
+ acp immutable.Option[acp.ACP],
col client.Collection,
fields []client.FieldDefinition,
filter *mapper.Filter,
docMapper *core.DocumentMapping,
reverse bool,
) error {
+ df.identity = identity
+ df.acp = acp
df.col = col
df.reverse = reverse
df.initialized = true
@@ -476,6 +493,7 @@ func (df *DocumentFetcher) processKV(kv *keyValue) error {
}
}
df.doc.id = []byte(kv.Key.DocID)
+ df.passedPermissionCheck = false
df.passedFilter = false
df.ranFilter = false
@@ -544,24 +562,26 @@ func (df *DocumentFetcher) FetchNext(ctx context.Context) (EncodedDocument, Exec
(df.reverse && ddf.kv.Key.DocID > df.kv.Key.DocID) ||
(!df.reverse && ddf.kv.Key.DocID < df.kv.Key.DocID) {
encdoc, execInfo, err := ddf.FetchNext(ctx)
+
if err != nil {
return nil, ExecInfo{}, err
}
- if encdoc != nil {
- return encdoc, execInfo, err
- }
resultExecInfo.Add(execInfo)
+ if encdoc != nil {
+ return encdoc, resultExecInfo, nil
+ }
}
}
}
encdoc, execInfo, err := df.fetchNext(ctx)
+
if err != nil {
return nil, ExecInfo{}, err
}
- resultExecInfo.Add(execInfo)
+ resultExecInfo.Add(execInfo)
return encdoc, resultExecInfo, err
}
@@ -573,9 +593,6 @@ func (df *DocumentFetcher) fetchNext(ctx context.Context) (EncodedDocument, Exec
if df.kv == nil {
return nil, ExecInfo{}, client.NewErrUninitializeProperty("DocumentFetcher", "kv")
}
- // save the DocID of the current kv pair so we can track when we cross the doc pair boundries
- // keyparts := df.kv.Key.List()
- // key := keyparts[len(keyparts)-2]
prevExecInfo := df.execInfo
defer func() { df.execInfo.Add(prevExecInfo) }()
@@ -584,8 +601,7 @@ func (df *DocumentFetcher) fetchNext(ctx context.Context) (EncodedDocument, Exec
// we'll know when were done when either
// A) Reach the end of the iterator
for {
- err := df.processKV(df.kv)
- if err != nil {
+ if err := df.processKV(df.kv); err != nil {
return nil, ExecInfo{}, err
}
@@ -606,16 +622,45 @@ func (df *DocumentFetcher) fetchNext(ctx context.Context) (EncodedDocument, Exec
}
}
- // if we don't pass the filter (ran and pass)
- // theres no point in collecting other select fields
- // so we seek to the next doc
- spansDone, docDone, err := df.nextKey(ctx, !df.passedFilter && df.ranFilter)
+ // Check if we have read access, for document on this collection, with the given identity.
+ if !df.passedPermissionCheck {
+ if !df.acp.HasValue() {
+ // If no acp is available, then we have unrestricted access.
+ df.passedPermissionCheck = true
+ } else {
+ hasPermission, err := permission.CheckAccessOfDocOnCollectionWithACP(
+ ctx,
+ df.identity,
+ df.acp.Value(),
+ df.col,
+ acp.ReadPermission,
+ df.kv.Key.DocID,
+ )
+
+ if err != nil {
+ df.passedPermissionCheck = false
+ return nil, ExecInfo{}, err
+ }
+
+ df.passedPermissionCheck = hasPermission
+ }
+ }
+
+ // if we don't pass the filter (ran and pass) or if we don't have access to document then
+ // there is no point in collecting other select fields, so we seek to the next doc.
+ spansDone, docDone, err := df.nextKey(ctx, !df.passedPermissionCheck || !df.passedFilter && df.ranFilter)
+
if err != nil {
return nil, ExecInfo{}, err
}
- if docDone {
- df.execInfo.DocsFetched++
+ if !docDone {
+ continue
+ }
+
+ df.execInfo.DocsFetched++
+
+ if df.passedPermissionCheck {
if df.filter != nil {
// if we passed, return
if df.passedFilter {
@@ -636,21 +681,11 @@ func (df *DocumentFetcher) fetchNext(ctx context.Context) (EncodedDocument, Exec
} else {
return df.doc, df.execInfo, nil
}
+ }
- if !spansDone {
- continue
- }
-
+ if spansDone {
return nil, df.execInfo, nil
}
-
- // // crossed document kv boundary?
- // // if so, return document
- // newkeyparts := df.kv.Key.List()
- // newKey := newkeyparts[len(newkeyparts)-2]
- // if newKey != key {
- // return df.doc, nil
- // }
}
}
diff --git a/db/fetcher/indexer.go b/db/fetcher/indexer.go
index 158c7cb88d..2e776fd55b 100644
--- a/db/fetcher/indexer.go
+++ b/db/fetcher/indexer.go
@@ -13,6 +13,10 @@ package fetcher
import (
"context"
+ "github.com/sourcenetwork/immutable"
+
+ "github.com/sourcenetwork/defradb/acp"
+ acpIdentity "github.com/sourcenetwork/defradb/acp/identity"
"github.com/sourcenetwork/defradb/client"
"github.com/sourcenetwork/defradb/core"
"github.com/sourcenetwork/defradb/datastore"
@@ -54,7 +58,9 @@ func NewIndexFetcher(
func (f *IndexFetcher) Init(
ctx context.Context,
+ identity immutable.Option[acpIdentity.Identity],
txn datastore.Txn,
+ acp immutable.Option[acp.ACP],
col client.Collection,
fields []client.FieldDefinition,
filter *mapper.Filter,
@@ -93,7 +99,18 @@ outer:
f.indexIter = iter
if f.docFetcher != nil && len(f.docFields) > 0 {
- err = f.docFetcher.Init(ctx, f.txn, f.col, f.docFields, f.docFilter, f.mapping, false, false)
+ err = f.docFetcher.Init(
+ ctx,
+ identity,
+ f.txn,
+ acp,
+ f.col,
+ f.docFields,
+ f.docFilter,
+ f.mapping,
+ false,
+ false,
+ )
}
return err
@@ -128,7 +145,7 @@ func (f *IndexFetcher) FetchNext(ctx context.Context) (EncodedDocument, ExecInfo
property := &encProperty{Desc: indexedField}
field := res.key.Fields[i]
- if field.Value == nil {
+ if field.Value.IsNil() {
hasNilField = true
}
@@ -147,11 +164,14 @@ func (f *IndexFetcher) FetchNext(ctx context.Context) (EncodedDocument, ExecInfo
if f.indexDesc.Unique && !hasNilField {
f.doc.id = res.value
} else {
- docID, ok := res.key.Fields[len(res.key.Fields)-1].Value.(string)
- if !ok {
+ lastVal := res.key.Fields[len(res.key.Fields)-1].Value
+ if str, ok := lastVal.String(); ok {
+ f.doc.id = []byte(str)
+ } else if bytes, ok := lastVal.Bytes(); ok {
+ f.doc.id = bytes
+ } else {
return nil, ExecInfo{}, err
}
- f.doc.id = []byte(docID)
}
if f.docFetcher != nil && len(f.docFields) > 0 {
diff --git a/db/fetcher/indexer_iterators.go b/db/fetcher/indexer_iterators.go
index 482c15d31a..d1ca5841c3 100644
--- a/db/fetcher/indexer_iterators.go
+++ b/db/fetcher/indexer_iterators.go
@@ -161,7 +161,7 @@ func (i *eqSingleIndexIterator) Close() error {
type inIndexIterator struct {
indexIterator
- inValues []any
+ inValues []client.NormalValue
nextValIndex int
ctx context.Context
store datastore.DSReaderWriter
@@ -274,7 +274,7 @@ func (iter *scanningIndexIterator) Next() (indexIterResult, error) {
// checks if the value satisfies the condition
type valueMatcher interface {
- Match(any) (bool, error)
+ Match(client.NormalValue) (bool, error)
}
type intMatcher struct {
@@ -282,12 +282,17 @@ type intMatcher struct {
evalFunc func(int64, int64) bool
}
-func (m *intMatcher) Match(value any) (bool, error) {
- intVal, ok := value.(int64)
- if !ok {
- return false, NewErrUnexpectedTypeValue[int64](value)
+func (m *intMatcher) Match(value client.NormalValue) (bool, error) {
+ if intVal, ok := value.Int(); ok {
+ return m.evalFunc(intVal, m.value), nil
+ }
+ if intOptVal, ok := value.NillableInt(); ok {
+ if !intOptVal.HasValue() {
+ return false, nil
+ }
+ return m.evalFunc(intOptVal.Value(), m.value), nil
}
- return m.evalFunc(intVal, m.value), nil
+ return false, NewErrUnexpectedTypeValue[int64](value)
}
type floatMatcher struct {
@@ -295,12 +300,17 @@ type floatMatcher struct {
evalFunc func(float64, float64) bool
}
-func (m *floatMatcher) Match(value any) (bool, error) {
- floatVal, ok := value.(float64)
- if !ok {
- return false, NewErrUnexpectedTypeValue[float64](value)
+func (m *floatMatcher) Match(value client.NormalValue) (bool, error) {
+ if floatVal, ok := value.Float(); ok {
+ return m.evalFunc(floatVal, m.value), nil
+ }
+ if floatOptVal, ok := value.NillableFloat(); ok {
+ if !floatOptVal.HasValue() {
+ return false, nil
+ }
+ return m.evalFunc(floatOptVal.Value(), m.value), nil
}
- return m.evalFunc(m.value, floatVal), nil
+ return false, NewErrUnexpectedTypeValue[float64](value)
}
type stringMatcher struct {
@@ -308,74 +318,36 @@ type stringMatcher struct {
evalFunc func(string, string) bool
}
-func (m *stringMatcher) Match(value any) (bool, error) {
- stringVal, ok := value.(string)
- if !ok {
- return false, NewErrUnexpectedTypeValue[string](value)
+func (m *stringMatcher) Match(value client.NormalValue) (bool, error) {
+ if strVal, ok := value.String(); ok {
+ return m.evalFunc(strVal, m.value), nil
+ }
+ if strOptVal, ok := value.NillableString(); ok {
+ if !strOptVal.HasValue() {
+ return false, nil
+ }
+ return m.evalFunc(strOptVal.Value(), m.value), nil
}
- return m.evalFunc(m.value, stringVal), nil
+ return false, NewErrUnexpectedTypeValue[string](value)
}
-type nilMatcher struct{}
+type nilMatcher struct {
+ matchNil bool
+}
-func (m *nilMatcher) Match(value any) (bool, error) {
- return value == nil, nil
+func (m *nilMatcher) Match(value client.NormalValue) (bool, error) {
+ return value.IsNil() == m.matchNil, nil
}
// checks if the index value is or is not in the given array
type indexInArrayMatcher struct {
- inValues []any
+ inValues []client.NormalValue
isIn bool
}
-func newNinIndexCmp(values []any, kind client.FieldKind, isIn bool) (*indexInArrayMatcher, error) {
- normalizeValueFunc := getNormalizeValueFunc(kind)
- for i := range values {
- normalized, err := normalizeValueFunc(values[i])
- if err != nil {
- return nil, err
- }
- values[i] = normalized
- }
- return &indexInArrayMatcher{inValues: values, isIn: isIn}, nil
-}
-
-func getNormalizeValueFunc(kind client.FieldKind) func(any) (any, error) {
- switch kind {
- case client.FieldKind_NILLABLE_INT:
- return func(value any) (any, error) {
- if v, ok := value.(int64); ok {
- return v, nil
- }
- if v, ok := value.(int32); ok {
- return int64(v), nil
- }
- return nil, ErrInvalidInOperatorValue
- }
- case client.FieldKind_NILLABLE_FLOAT:
- return func(value any) (any, error) {
- if v, ok := value.(float64); ok {
- return v, nil
- }
- if v, ok := value.(float32); ok {
- return float64(v), nil
- }
- return nil, ErrInvalidInOperatorValue
- }
- case client.FieldKind_NILLABLE_STRING:
- return func(value any) (any, error) {
- if v, ok := value.(string); ok {
- return v, nil
- }
- return nil, ErrInvalidInOperatorValue
- }
- }
- return nil
-}
-
-func (m *indexInArrayMatcher) Match(value any) (bool, error) {
+func (m *indexInArrayMatcher) Match(value client.NormalValue) (bool, error) {
for _, inVal := range m.inValues {
- if inVal == value {
+ if inVal.Unwrap() == value.Unwrap() {
return m.isIn, nil
}
}
@@ -419,17 +391,23 @@ func newLikeIndexCmp(filterValue string, isLike bool, isCaseInsensitive bool) (*
return matcher, nil
}
-func (m *indexLikeMatcher) Match(value any) (bool, error) {
- currentVal, ok := value.(string)
+func (m *indexLikeMatcher) Match(value client.NormalValue) (bool, error) {
+ strVal, ok := value.String()
if !ok {
- return false, NewErrUnexpectedTypeValue[string](currentVal)
+ strOptVal, ok := value.NillableString()
+ if !ok {
+ return false, NewErrUnexpectedTypeValue[string](value)
+ }
+ if !strOptVal.HasValue() {
+ return false, nil
+ }
+ strVal = strOptVal.Value()
}
-
if m.isCaseInsensitive {
- currentVal = strings.ToLower(currentVal)
+ strVal = strings.ToLower(strVal)
}
- return m.doesMatch(currentVal) == m.isLike, nil
+ return m.doesMatch(strVal) == m.isLike, nil
}
func (m *indexLikeMatcher) doesMatch(currentVal string) bool {
@@ -451,7 +429,7 @@ func (m *indexLikeMatcher) doesMatch(currentVal string) bool {
type anyMatcher struct{}
-func (m *anyMatcher) Match(any) (bool, error) { return true, nil }
+func (m *anyMatcher) Match(client.NormalValue) (bool, error) { return true, nil }
// newPrefixIndexIterator creates a new eqPrefixIndexIterator for fetching indexed data.
// It can modify the input matchers slice.
@@ -459,7 +437,7 @@ func (f *IndexFetcher) newPrefixIndexIterator(
fieldConditions []fieldFilterCond,
matchers []valueMatcher,
) (*eqPrefixIndexIterator, error) {
- keyFieldValues := make([]any, 0, len(fieldConditions))
+ keyFieldValues := make([]client.NormalValue, 0, len(fieldConditions))
for i := range fieldConditions {
if fieldConditions[i].op != opEq {
// prefix can be created only for subsequent _eq conditions
@@ -496,14 +474,12 @@ func (f *IndexFetcher) newInIndexIterator(
fieldConditions []fieldFilterCond,
matchers []valueMatcher,
) (*inIndexIterator, error) {
- inArr, ok := fieldConditions[0].val.([]any)
- if !ok {
+ if !fieldConditions[0].val.IsArray() {
return nil, ErrInvalidInOperatorValue
}
- inValues := make([]any, 0, len(inArr))
- for _, v := range inArr {
- fieldVal := client.NewFieldValue(client.NONE_CRDT, v)
- inValues = append(inValues, fieldVal.Value())
+ inValues, err := client.ToArrayOfNormalValues(fieldConditions[0].val)
+ if err != nil {
+ return nil, err
}
// iterators for _in filter already iterate over keys with first field value
@@ -514,7 +490,7 @@ func (f *IndexFetcher) newInIndexIterator(
var iter indexIterator
if isUniqueFetchByFullKey(&f.indexDesc, fieldConditions) {
- keyFieldValues := make([]any, len(fieldConditions))
+ keyFieldValues := make([]client.NormalValue, len(fieldConditions))
for i := range fieldConditions {
keyFieldValues[i] = fieldConditions[i].val
}
@@ -547,7 +523,7 @@ func (f *IndexFetcher) newIndexDataStoreKey() core.IndexDataStoreKey {
return key
}
-func (f *IndexFetcher) newIndexDataStoreKeyWithValues(values []any) core.IndexDataStoreKey {
+func (f *IndexFetcher) newIndexDataStoreKeyWithValues(values []client.NormalValue) core.IndexDataStoreKey {
fields := make([]core.IndexedField, len(values))
for i := range values {
fields[i].Value = values[i]
@@ -557,7 +533,10 @@ func (f *IndexFetcher) newIndexDataStoreKeyWithValues(values []any) core.IndexDa
}
func (f *IndexFetcher) createIndexIterator() (indexIterator, error) {
- fieldConditions := f.determineFieldFilterConditions()
+ fieldConditions, err := f.determineFieldFilterConditions()
+ if err != nil {
+ return nil, err
+ }
matchers, err := createValueMatchers(fieldConditions)
if err != nil {
@@ -567,7 +546,7 @@ func (f *IndexFetcher) createIndexIterator() (indexIterator, error) {
switch fieldConditions[0].op {
case opEq:
if isUniqueFetchByFullKey(&f.indexDesc, fieldConditions) {
- keyFieldValues := make([]any, len(fieldConditions))
+ keyFieldValues := make([]client.NormalValue, len(fieldConditions))
for i := range fieldConditions {
keyFieldValues[i] = fieldConditions[i].val
}
@@ -600,49 +579,44 @@ func createValueMatcher(condition *fieldFilterCond) (valueMatcher, error) {
return &anyMatcher{}, nil
}
- if client.IsNillableKind(condition.kind) && condition.val == nil {
- return &nilMatcher{}, nil
+ if condition.val.IsNil() {
+ return &nilMatcher{matchNil: condition.op == opEq}, nil
}
switch condition.op {
case opEq, opGt, opGe, opLt, opLe, opNe:
- switch condition.kind {
- case client.FieldKind_NILLABLE_INT:
- var intVal int64
- switch v := condition.val.(type) {
- case int64:
- intVal = v
- case int32:
- intVal = int64(v)
- case int:
- intVal = int64(v)
- default:
- return nil, NewErrUnexpectedTypeValue[int64](condition.val)
- }
- return &intMatcher{value: intVal, evalFunc: getCompareValsFunc[int64](condition.op)}, nil
- case client.FieldKind_NILLABLE_FLOAT:
- floatVal, ok := condition.val.(float64)
- if !ok {
- return nil, NewErrUnexpectedTypeValue[float64](condition.val)
- }
- return &floatMatcher{value: floatVal, evalFunc: getCompareValsFunc[float64](condition.op)}, nil
- case client.FieldKind_DocID, client.FieldKind_NILLABLE_STRING:
- strVal, ok := condition.val.(string)
- if !ok {
- return nil, NewErrUnexpectedTypeValue[string](condition.val)
- }
- return &stringMatcher{value: strVal, evalFunc: getCompareValsFunc[string](condition.op)}, nil
+ if v, ok := condition.val.Int(); ok {
+ return &intMatcher{value: v, evalFunc: getCompareValsFunc[int64](condition.op)}, nil
+ }
+ if v, ok := condition.val.NillableInt(); ok {
+ return &intMatcher{value: v.Value(), evalFunc: getCompareValsFunc[int64](condition.op)}, nil
+ }
+ if v, ok := condition.val.Float(); ok {
+ return &floatMatcher{value: v, evalFunc: getCompareValsFunc[float64](condition.op)}, nil
+ }
+ if v, ok := condition.val.NillableFloat(); ok {
+ return &floatMatcher{value: v.Value(), evalFunc: getCompareValsFunc[float64](condition.op)}, nil
+ }
+ if v, ok := condition.val.String(); ok {
+ return &stringMatcher{value: v, evalFunc: getCompareValsFunc[string](condition.op)}, nil
+ }
+ if v, ok := condition.val.NillableString(); ok {
+ return &stringMatcher{value: v.Value(), evalFunc: getCompareValsFunc[string](condition.op)}, nil
}
case opIn, opNin:
- inArr, ok := condition.val.([]any)
- if !ok {
- return nil, ErrInvalidInOperatorValue
+ inVals, err := client.ToArrayOfNormalValues(condition.val)
+ if err != nil {
+ return nil, err
}
- return newNinIndexCmp(inArr, condition.kind, condition.op == opIn)
+ return &indexInArrayMatcher{inValues: inVals, isIn: condition.op == opIn}, nil
case opLike, opNlike, opILike, opNILike:
- strVal, ok := condition.val.(string)
+ strVal, ok := condition.val.String()
if !ok {
- return nil, NewErrUnexpectedTypeValue[string](condition.val)
+ strOptVal, ok := condition.val.NillableString()
+ if !ok {
+ return nil, NewErrUnexpectedTypeValue[string](condition.val)
+ }
+ strVal = strOptVal.Value()
}
isLike := condition.op == opLike || condition.op == opILike
isCaseInsensitive := condition.op == opILike || condition.op == opNILike
@@ -668,14 +642,14 @@ func createValueMatchers(conditions []fieldFilterCond) ([]valueMatcher, error) {
type fieldFilterCond struct {
op string
- val any
+ val client.NormalValue
kind client.FieldKind
}
// determineFieldFilterConditions determines the conditions and their corresponding operation
// for each indexed field.
// It returns a slice of fieldFilterCond, where each element corresponds to a field in the index.
-func (f *IndexFetcher) determineFieldFilterConditions() []fieldFilterCond {
+func (f *IndexFetcher) determineFieldFilterConditions() ([]fieldFilterCond, error) {
result := make([]fieldFilterCond, 0, len(f.indexedFields))
for i := range f.indexedFields {
fieldInd := f.mapping.FirstIndexOfName(f.indexedFields[i].Name)
@@ -692,9 +666,19 @@ func (f *IndexFetcher) determineFieldFilterConditions() []fieldFilterCond {
condMap := indexFilterCond.(map[connor.FilterKey]any)
for key, filterVal := range condMap {
opKey := key.(*mapper.Operator)
+ var normalVal client.NormalValue
+ var err error
+ if filterVal == nil {
+ normalVal, err = client.NewNormalNil(f.indexedFields[i].Kind)
+ } else {
+ normalVal, err = client.NewNormalValue(filterVal)
+ }
+ if err != nil {
+ return nil, err
+ }
result = append(result, fieldFilterCond{
op: opKey.Operation,
- val: filterVal,
+ val: normalVal,
kind: f.indexedFields[i].Kind,
})
break
@@ -702,10 +686,14 @@ func (f *IndexFetcher) determineFieldFilterConditions() []fieldFilterCond {
break
}
if !found {
- result = append(result, fieldFilterCond{op: opAny})
+ result = append(result, fieldFilterCond{
+ op: opAny,
+ val: client.NormalVoid{},
+ kind: f.indexedFields[i].Kind,
+ })
}
}
- return result
+ return result, nil
}
// isUniqueFetchByFullKey checks if the only index key can be fetched by the full index key.
@@ -719,11 +707,11 @@ func isUniqueFetchByFullKey(indexDesc *client.IndexDescription, conditions []fie
res := indexDesc.Unique && len(conditions) == len(indexDesc.Fields)
// first condition is not required to be _eq, but if is, val must be not nil
- res = res && (conditions[0].op != opEq || conditions[0].val != nil)
+ res = res && (conditions[0].op != opEq || !conditions[0].val.IsNil())
// for the rest it must be _eq and val must be not nil
for i := 1; i < len(conditions); i++ {
- res = res && (conditions[i].op == opEq && conditions[i].val != nil)
+ res = res && (conditions[i].op == opEq && !conditions[i].val.IsNil())
}
return res
}
diff --git a/db/fetcher/mocks/fetcher.go b/db/fetcher/mocks/fetcher.go
index 044425c70b..4f537aefea 100644
--- a/db/fetcher/mocks/fetcher.go
+++ b/db/fetcher/mocks/fetcher.go
@@ -3,16 +3,21 @@
package mocks
import (
- context "context"
-
+ acp "github.com/sourcenetwork/defradb/acp"
client "github.com/sourcenetwork/defradb/client"
+ context "context"
+
core "github.com/sourcenetwork/defradb/core"
datastore "github.com/sourcenetwork/defradb/datastore"
fetcher "github.com/sourcenetwork/defradb/db/fetcher"
+ identity "github.com/sourcenetwork/defradb/acp/identity"
+
+ immutable "github.com/sourcenetwork/immutable"
+
mapper "github.com/sourcenetwork/defradb/planner/mapper"
mock "github.com/stretchr/testify/mock"
@@ -133,13 +138,13 @@ func (_c *Fetcher_FetchNext_Call) RunAndReturn(run func(context.Context) (fetche
return _c
}
-// Init provides a mock function with given fields: ctx, txn, col, fields, filter, docmapper, reverse, showDeleted
-func (_m *Fetcher) Init(ctx context.Context, txn datastore.Txn, col client.Collection, fields []client.FieldDefinition, filter *mapper.Filter, docmapper *core.DocumentMapping, reverse bool, showDeleted bool) error {
- ret := _m.Called(ctx, txn, col, fields, filter, docmapper, reverse, showDeleted)
+// Init provides a mock function with given fields: ctx, _a1, txn, _a3, col, fields, filter, docmapper, reverse, showDeleted
+func (_m *Fetcher) Init(ctx context.Context, _a1 immutable.Option[identity.Identity], txn datastore.Txn, _a3 immutable.Option[acp.ACP], col client.Collection, fields []client.FieldDefinition, filter *mapper.Filter, docmapper *core.DocumentMapping, reverse bool, showDeleted bool) error {
+ ret := _m.Called(ctx, _a1, txn, _a3, col, fields, filter, docmapper, reverse, showDeleted)
var r0 error
- if rf, ok := ret.Get(0).(func(context.Context, datastore.Txn, client.Collection, []client.FieldDefinition, *mapper.Filter, *core.DocumentMapping, bool, bool) error); ok {
- r0 = rf(ctx, txn, col, fields, filter, docmapper, reverse, showDeleted)
+ if rf, ok := ret.Get(0).(func(context.Context, immutable.Option[identity.Identity], datastore.Txn, immutable.Option[acp.ACP], client.Collection, []client.FieldDefinition, *mapper.Filter, *core.DocumentMapping, bool, bool) error); ok {
+ r0 = rf(ctx, _a1, txn, _a3, col, fields, filter, docmapper, reverse, showDeleted)
} else {
r0 = ret.Error(0)
}
@@ -154,20 +159,22 @@ type Fetcher_Init_Call struct {
// Init is a helper method to define mock.On call
// - ctx context.Context
+// - _a1 immutable.Option[identity.Identity]
// - txn datastore.Txn
+// - _a3 immutable.Option[acp.ACP]
// - col client.Collection
// - fields []client.FieldDefinition
// - filter *mapper.Filter
// - docmapper *core.DocumentMapping
// - reverse bool
// - showDeleted bool
-func (_e *Fetcher_Expecter) Init(ctx interface{}, txn interface{}, col interface{}, fields interface{}, filter interface{}, docmapper interface{}, reverse interface{}, showDeleted interface{}) *Fetcher_Init_Call {
- return &Fetcher_Init_Call{Call: _e.mock.On("Init", ctx, txn, col, fields, filter, docmapper, reverse, showDeleted)}
+func (_e *Fetcher_Expecter) Init(ctx interface{}, _a1 interface{}, txn interface{}, _a3 interface{}, col interface{}, fields interface{}, filter interface{}, docmapper interface{}, reverse interface{}, showDeleted interface{}) *Fetcher_Init_Call {
+ return &Fetcher_Init_Call{Call: _e.mock.On("Init", ctx, _a1, txn, _a3, col, fields, filter, docmapper, reverse, showDeleted)}
}
-func (_c *Fetcher_Init_Call) Run(run func(ctx context.Context, txn datastore.Txn, col client.Collection, fields []client.FieldDefinition, filter *mapper.Filter, docmapper *core.DocumentMapping, reverse bool, showDeleted bool)) *Fetcher_Init_Call {
+func (_c *Fetcher_Init_Call) Run(run func(ctx context.Context, _a1 immutable.Option[identity.Identity], txn datastore.Txn, _a3 immutable.Option[acp.ACP], col client.Collection, fields []client.FieldDefinition, filter *mapper.Filter, docmapper *core.DocumentMapping, reverse bool, showDeleted bool)) *Fetcher_Init_Call {
_c.Call.Run(func(args mock.Arguments) {
- run(args[0].(context.Context), args[1].(datastore.Txn), args[2].(client.Collection), args[3].([]client.FieldDefinition), args[4].(*mapper.Filter), args[5].(*core.DocumentMapping), args[6].(bool), args[7].(bool))
+ run(args[0].(context.Context), args[1].(immutable.Option[identity.Identity]), args[2].(datastore.Txn), args[3].(immutable.Option[acp.ACP]), args[4].(client.Collection), args[5].([]client.FieldDefinition), args[6].(*mapper.Filter), args[7].(*core.DocumentMapping), args[8].(bool), args[9].(bool))
})
return _c
}
@@ -177,7 +184,7 @@ func (_c *Fetcher_Init_Call) Return(_a0 error) *Fetcher_Init_Call {
return _c
}
-func (_c *Fetcher_Init_Call) RunAndReturn(run func(context.Context, datastore.Txn, client.Collection, []client.FieldDefinition, *mapper.Filter, *core.DocumentMapping, bool, bool) error) *Fetcher_Init_Call {
+func (_c *Fetcher_Init_Call) RunAndReturn(run func(context.Context, immutable.Option[identity.Identity], datastore.Txn, immutable.Option[acp.ACP], client.Collection, []client.FieldDefinition, *mapper.Filter, *core.DocumentMapping, bool, bool) error) *Fetcher_Init_Call {
_c.Call.Return(run)
return _c
}
diff --git a/db/fetcher/mocks/utils.go b/db/fetcher/mocks/utils.go
index 298d5b2ad6..524c46fc9e 100644
--- a/db/fetcher/mocks/utils.go
+++ b/db/fetcher/mocks/utils.go
@@ -27,6 +27,8 @@ func NewStubbedFetcher(t *testing.T) *Fetcher {
mock.Anything,
mock.Anything,
mock.Anything,
+ mock.Anything,
+ mock.Anything,
).Maybe().Return(nil)
f.EXPECT().Start(mock.Anything, mock.Anything).Maybe().Return(nil)
f.EXPECT().FetchNext(mock.Anything).Maybe().Return(nil, nil)
diff --git a/db/fetcher/versioned.go b/db/fetcher/versioned.go
index 3f05f2c29a..096002521c 100644
--- a/db/fetcher/versioned.go
+++ b/db/fetcher/versioned.go
@@ -19,6 +19,10 @@ import (
ds "github.com/ipfs/go-datastore"
format "github.com/ipfs/go-ipld-format"
+ "github.com/sourcenetwork/immutable"
+
+ "github.com/sourcenetwork/defradb/acp"
+ acpIdentity "github.com/sourcenetwork/defradb/acp/identity"
"github.com/sourcenetwork/defradb/client"
"github.com/sourcenetwork/defradb/core"
"github.com/sourcenetwork/defradb/datastore"
@@ -91,6 +95,8 @@ type VersionedFetcher struct {
queuedCids *list.List
+ acp immutable.Option[acp.ACP]
+
col client.Collection
// @todo index *client.IndexDescription
mCRDTs map[uint32]merklecrdt.MerkleCRDT
@@ -99,7 +105,9 @@ type VersionedFetcher struct {
// Init initializes the VersionedFetcher.
func (vf *VersionedFetcher) Init(
ctx context.Context,
+ identity immutable.Option[acpIdentity.Identity],
txn datastore.Txn,
+ acp immutable.Option[acp.ACP],
col client.Collection,
fields []client.FieldDefinition,
filter *mapper.Filter,
@@ -107,6 +115,7 @@ func (vf *VersionedFetcher) Init(
reverse bool,
showDeleted bool,
) error {
+ vf.acp = acp
vf.col = col
vf.queuedCids = list.New()
vf.mCRDTs = make(map[uint32]merklecrdt.MerkleCRDT)
@@ -130,7 +139,18 @@ func (vf *VersionedFetcher) Init(
// run the DF init, VersionedFetchers only supports the Primary (0) index
vf.DocumentFetcher = new(DocumentFetcher)
- return vf.DocumentFetcher.Init(ctx, vf.store, col, fields, filter, docmapper, reverse, showDeleted)
+ return vf.DocumentFetcher.Init(
+ ctx,
+ identity,
+ vf.store,
+ acp,
+ col,
+ fields,
+ filter,
+ docmapper,
+ reverse,
+ showDeleted,
+ )
}
// Start serializes the correct state according to the Key and CID.
diff --git a/db/index.go b/db/index.go
index 319cdeb8a7..693a18a5bf 100644
--- a/db/index.go
+++ b/db/index.go
@@ -36,8 +36,12 @@ func canConvertIndexFieldValue[T any](val any) bool {
}
func getValidateIndexFieldFunc(kind client.FieldKind) func(any) bool {
+ if kind.IsObject() && !kind.IsArray() {
+ return canConvertIndexFieldValue[string]
+ }
+
switch kind {
- case client.FieldKind_NILLABLE_STRING, client.FieldKind_FOREIGN_OBJECT:
+ case client.FieldKind_NILLABLE_STRING:
return canConvertIndexFieldValue[string]
case client.FieldKind_NILLABLE_INT:
return canConvertIndexFieldValue[int64]
@@ -112,18 +116,22 @@ type collectionBaseIndex struct {
fieldsDescs []client.SchemaFieldDescription
}
-func (index *collectionBaseIndex) getDocFieldValues(doc *client.Document) ([]*client.FieldValue, error) {
- result := make([]*client.FieldValue, 0, len(index.fieldsDescs))
+func (index *collectionBaseIndex) getDocFieldValues(doc *client.Document) ([]client.NormalValue, error) {
+ result := make([]client.NormalValue, 0, len(index.fieldsDescs))
for iter := range index.fieldsDescs {
fieldVal, err := doc.TryGetValue(index.fieldsDescs[iter].Name)
if err != nil {
return nil, err
}
if fieldVal == nil || fieldVal.Value() == nil {
- result = append(result, client.NewFieldValue(client.NONE_CRDT, nil))
+ normalNil, err := client.NewNormalNil(index.fieldsDescs[iter].Kind)
+ if err != nil {
+ return nil, err
+ }
+ result = append(result, normalNil)
continue
}
- result = append(result, fieldVal)
+ result = append(result, fieldVal.NormalValue())
}
return result, nil
}
@@ -138,7 +146,7 @@ func (index *collectionBaseIndex) getDocumentsIndexKey(
fields := make([]core.IndexedField, len(index.fieldsDescs))
for i := range index.fieldsDescs {
- fields[i].Value = fieldValues[i].Value()
+ fields[i].Value = fieldValues[i]
fields[i].Descending = index.desc.Fields[i].Descending
}
return core.NewIndexDataStoreKey(index.collection.ID(), index.desc.ID, fields), nil
@@ -207,7 +215,7 @@ func (index *collectionSimpleIndex) getDocumentsIndexKey(
return core.IndexDataStoreKey{}, err
}
- key.Fields = append(key.Fields, core.IndexedField{Value: doc.ID().String()})
+ key.Fields = append(key.Fields, core.IndexedField{Value: client.NewNormalString(doc.ID().String())})
return key, nil
}
@@ -264,7 +272,7 @@ func (index *collectionSimpleIndex) deleteDocIndex(
// hasIndexKeyNilField returns true if the index key has a field with nil value
func hasIndexKeyNilField(key *core.IndexDataStoreKey) bool {
for i := range key.Fields {
- if key.Fields[i].Value == nil {
+ if key.Fields[i].Value.IsNil() {
return true
}
}
@@ -330,7 +338,7 @@ func (index *collectionUniqueIndex) getDocumentsIndexRecord(
return core.IndexDataStoreKey{}, nil, err
}
if hasIndexKeyNilField(&key) {
- key.Fields = append(key.Fields, core.IndexedField{Value: doc.ID().String()})
+ key.Fields = append(key.Fields, core.IndexedField{Value: client.NewNormalString(doc.ID().String())})
return key, []byte{}, nil
} else {
return key, []byte(doc.ID().String()), nil
@@ -373,6 +381,11 @@ func (index *collectionUniqueIndex) Update(
oldDoc *client.Document,
newDoc *client.Document,
) error {
+ // We only need to update the index if one of the indexed fields
+ // on the document has been changed.
+ if !isUpdatingIndexedFields(index, oldDoc, newDoc) {
+ return nil
+ }
newKey, newVal, err := index.prepareIndexRecordToStore(ctx, txn, newDoc)
if err != nil {
return err
@@ -395,3 +408,25 @@ func (index *collectionUniqueIndex) deleteDocIndex(
}
return index.deleteIndexKey(ctx, txn, key)
}
+
+func isUpdatingIndexedFields(index CollectionIndex, oldDoc, newDoc *client.Document) bool {
+ for _, indexedFields := range index.Description().Fields {
+ oldVal, getOldValErr := oldDoc.GetValue(indexedFields.Name)
+ newVal, getNewValErr := newDoc.GetValue(indexedFields.Name)
+
+ // GetValue will return an error when the field doesn't exist.
+ // This will happen for oldDoc only if the field hasn't been set
+ // when first creating the document. For newDoc, this will happen
+ // only if the field hasn't been set when first creating the document
+ // AND the field hasn't been set on the update.
+ switch {
+ case getOldValErr != nil && getNewValErr != nil:
+ continue
+ case getOldValErr != nil && getNewValErr == nil:
+ return true
+ case oldVal.Value() != newVal.Value():
+ return true
+ }
+ }
+ return false
+}
diff --git a/db/index_test.go b/db/index_test.go
index 44c2e45f52..5409b6c20e 100644
--- a/db/index_test.go
+++ b/db/index_test.go
@@ -53,7 +53,7 @@ const (
type indexTestFixture struct {
ctx context.Context
- db *implicitTxnDB
+ db *db
txn datastore.Txn
users client.Collection
t *testing.T
@@ -219,7 +219,8 @@ func (f *indexTestFixture) createUserCollectionIndexOnAge() client.IndexDescript
}
func (f *indexTestFixture) dropIndex(colName, indexName string) error {
- return f.db.dropCollectionIndex(f.ctx, f.txn, colName, indexName)
+ ctx := SetContextTxn(f.ctx, f.txn)
+ return f.db.dropCollectionIndex(ctx, colName, indexName)
}
func (f *indexTestFixture) countIndexPrefixes(indexName string) int {
@@ -255,7 +256,8 @@ func (f *indexTestFixture) createCollectionIndexFor(
collectionName string,
desc client.IndexDescription,
) (client.IndexDescription, error) {
- index, err := f.db.createCollectionIndex(f.ctx, f.txn, collectionName, desc)
+ ctx := SetContextTxn(f.ctx, f.txn)
+ index, err := f.db.createCollectionIndex(ctx, collectionName, desc)
if err == nil {
f.commitTxn()
}
@@ -263,11 +265,13 @@ func (f *indexTestFixture) createCollectionIndexFor(
}
func (f *indexTestFixture) getAllIndexes() (map[client.CollectionName][]client.IndexDescription, error) {
- return f.db.getAllIndexDescriptions(f.ctx, f.txn)
+ ctx := SetContextTxn(f.ctx, f.txn)
+ return f.db.getAllIndexDescriptions(ctx)
}
func (f *indexTestFixture) getCollectionIndexes(colID uint32) ([]client.IndexDescription, error) {
- return f.db.fetchCollectionIndexDescriptions(f.ctx, f.txn, colID)
+ ctx := SetContextTxn(f.ctx, f.txn)
+ return f.db.fetchCollectionIndexDescriptions(ctx, colID)
}
func TestCreateIndex_IfFieldsIsEmpty_ReturnError(t *testing.T) {
@@ -784,7 +788,8 @@ func TestCollectionGetIndexes_ShouldCloseQueryIterator(t *testing.T) {
mockedTxn.MockSystemstore.EXPECT().Query(mock.Anything, mock.Anything).
Return(queryResults, nil)
- _, err := f.users.WithTxn(mockedTxn).GetIndexes(f.ctx)
+ ctx := SetContextTxn(f.ctx, mockedTxn)
+ _, err := f.users.GetIndexes(ctx)
assert.NoError(t, err)
}
@@ -840,7 +845,8 @@ func TestCollectionGetIndexes_IfSystemStoreFails_ReturnError(t *testing.T) {
mockedTxn.EXPECT().Systemstore().Unset()
mockedTxn.EXPECT().Systemstore().Return(mockedTxn.MockSystemstore).Maybe()
- _, err := f.users.WithTxn(mockedTxn).GetIndexes(f.ctx)
+ ctx := SetContextTxn(f.ctx, mockedTxn)
+ _, err := f.users.GetIndexes(ctx)
require.ErrorIs(t, err, testCase.ExpectedError)
}
}
@@ -902,7 +908,8 @@ func TestCollectionGetIndexes_IfStoredIndexWithUnsupportedType_ReturnError(t *te
mockedTxn.MockSystemstore.EXPECT().Query(mock.Anything, mock.Anything).
Return(mocks.NewQueryResultsWithValues(t, indexDescData), nil)
- _, err = collection.WithTxn(mockedTxn).GetIndexes(f.ctx)
+ ctx := SetContextTxn(f.ctx, mockedTxn)
+ _, err = collection.GetIndexes(ctx)
require.ErrorIs(t, err, NewErrUnsupportedIndexFieldType(unsupportedKind))
}
@@ -1093,17 +1100,18 @@ func TestDropIndex_IfFailsToDeleteFromStorage_ReturnError(t *testing.T) {
mockedTxn.MockDatastore.EXPECT().Query(mock.Anything, mock.Anything).Maybe().
Return(mocks.NewQueryResultsWithValues(t), nil)
- err := f.users.WithTxn(mockedTxn).DropIndex(f.ctx, testUsersColIndexName)
+ ctx := SetContextTxn(f.ctx, mockedTxn)
+ err := f.users.DropIndex(ctx, testUsersColIndexName)
require.ErrorIs(t, err, testErr)
}
func TestDropIndex_ShouldUpdateCollectionsDescription(t *testing.T) {
f := newIndexTestFixture(t)
defer f.db.Close()
- col := f.users.WithTxn(f.txn)
- _, err := col.CreateIndex(f.ctx, getUsersIndexDescOnName())
+ ctx := SetContextTxn(f.ctx, f.txn)
+ _, err := f.users.CreateIndex(ctx, getUsersIndexDescOnName())
require.NoError(t, err)
- indOnAge, err := col.CreateIndex(f.ctx, getUsersIndexDescOnAge())
+ indOnAge, err := f.users.CreateIndex(ctx, getUsersIndexDescOnAge())
require.NoError(t, err)
f.commitTxn()
@@ -1144,7 +1152,8 @@ func TestDropIndex_IfSystemStoreFails_ReturnError(t *testing.T) {
mockedTxn.EXPECT().Systemstore().Unset()
mockedTxn.EXPECT().Systemstore().Return(mockedTxn.MockSystemstore).Maybe()
- err := f.users.WithTxn(mockedTxn).DropIndex(f.ctx, testUsersColIndexName)
+ ctx := SetContextTxn(f.ctx, mockedTxn)
+ err := f.users.DropIndex(ctx, testUsersColIndexName)
require.ErrorIs(t, err, testErr)
}
@@ -1167,7 +1176,8 @@ func TestDropAllIndexes_ShouldDeleteAllIndexes(t *testing.T) {
assert.Equal(t, 2, f.countIndexPrefixes(""))
- err = f.users.(*collection).dropAllIndexes(f.ctx, f.txn)
+ ctx := SetContextTxn(f.ctx, f.txn)
+ err = f.users.(*collection).dropAllIndexes(ctx)
assert.NoError(t, err)
assert.Equal(t, 0, f.countIndexPrefixes(""))
@@ -1179,7 +1189,8 @@ func TestDropAllIndexes_IfStorageFails_ReturnError(t *testing.T) {
f.createUserCollectionIndexOnName()
f.db.Close()
- err := f.users.(*collection).dropAllIndexes(f.ctx, f.txn)
+ ctx := SetContextTxn(f.ctx, f.txn)
+ err := f.users.(*collection).dropAllIndexes(ctx)
assert.Error(t, err)
}
@@ -1235,7 +1246,8 @@ func TestDropAllIndexes_IfSystemStorageFails_ReturnError(t *testing.T) {
mockedTxn.EXPECT().Systemstore().Unset()
mockedTxn.EXPECT().Systemstore().Return(mockedTxn.MockSystemstore).Maybe()
- err := f.users.(*collection).dropAllIndexes(f.ctx, f.txn)
+ ctx := SetContextTxn(f.ctx, f.txn)
+ err := f.users.(*collection).dropAllIndexes(ctx)
assert.ErrorIs(t, err, testErr, testCase.Name)
}
}
@@ -1256,7 +1268,8 @@ func TestDropAllIndexes_ShouldCloseQueryIterator(t *testing.T) {
mockedTxn.EXPECT().Systemstore().Unset()
mockedTxn.EXPECT().Systemstore().Return(mockedTxn.MockSystemstore).Maybe()
- _ = f.users.(*collection).dropAllIndexes(f.ctx, f.txn)
+ ctx := SetContextTxn(f.ctx, f.txn)
+ _ = f.users.(*collection).dropAllIndexes(ctx)
}
func TestNewCollectionIndex_IfDescriptionHasNoFields_ReturnError(t *testing.T) {
diff --git a/db/indexed_docs_test.go b/db/indexed_docs_test.go
index d10ad8eb5b..c3c1c6de7b 100644
--- a/db/indexed_docs_test.go
+++ b/db/indexed_docs_test.go
@@ -23,6 +23,8 @@ import (
"github.com/stretchr/testify/mock"
"github.com/stretchr/testify/require"
+ "github.com/sourcenetwork/defradb/acp"
+ acpIdentity "github.com/sourcenetwork/defradb/acp/identity"
"github.com/sourcenetwork/defradb/client"
"github.com/sourcenetwork/defradb/core"
"github.com/sourcenetwork/defradb/datastore"
@@ -58,7 +60,7 @@ func (f *indexTestFixture) newUserDoc(name string, age int, col client.Collectio
data, err := json.Marshal(d)
require.NoError(f.t, err)
- doc, err := client.NewDocFromJSON(data, col.Schema())
+ doc, err := client.NewDocFromJSON(data, col.Definition())
require.NoError(f.t, err)
return doc
}
@@ -68,7 +70,7 @@ func (f *indexTestFixture) newProdDoc(id int, price float64, cat string, col cli
data, err := json.Marshal(d)
require.NoError(f.t, err)
- doc, err := client.NewDocFromJSON(data, col.Schema())
+ doc, err := client.NewDocFromJSON(data, col.Definition())
require.NoError(f.t, err)
return doc
}
@@ -129,7 +131,8 @@ func (b *indexKeyBuilder) Build() core.IndexDataStoreKey {
return key
}
- cols, err := b.f.db.getCollections(b.f.ctx, b.f.txn, client.CollectionFetchOptions{})
+ ctx := SetContextTxn(b.f.ctx, b.f.txn)
+ cols, err := b.f.db.getCollections(ctx, client.CollectionFetchOptions{})
require.NoError(b.f.t, err)
var collection client.Collection
for _, col := range cols {
@@ -166,15 +169,25 @@ indexLoop:
hasNilValue := false
for i, fieldName := range b.fieldsNames {
fieldValue, err := b.doc.GetValue(fieldName)
- var val any
+ var val client.NormalValue
if err != nil {
if !errors.Is(err, client.ErrFieldNotExist) {
require.NoError(b.f.t, err)
}
- } else if fieldValue != nil {
- val = fieldValue.Value()
}
- if val == nil {
+ if fieldValue != nil {
+ val = fieldValue.NormalValue()
+ } else {
+ kind := client.FieldKind_NILLABLE_STRING
+ if fieldName == usersAgeFieldName {
+ kind = client.FieldKind_NILLABLE_INT
+ } else if fieldName == usersWeightFieldName {
+ kind = client.FieldKind_NILLABLE_FLOAT
+ }
+ val, err = client.NewNormalNil(kind)
+ require.NoError(b.f.t, err)
+ }
+ if val.IsNil() {
hasNilValue = true
}
descending := false
@@ -185,7 +198,7 @@ indexLoop:
}
if !b.isUnique || hasNilValue {
- key.Fields = append(key.Fields, core.IndexedField{Value: b.doc.ID().String()})
+ key.Fields = append(key.Fields, core.IndexedField{Value: client.NewNormalString(b.doc.ID().String())})
}
}
@@ -310,7 +323,8 @@ func TestNonUnique_IfFailsToStoredIndexedDoc_Error(t *testing.T) {
dataStoreOn.Put(mock.Anything, key.ToDS(), mock.Anything).Return(errors.New("error"))
dataStoreOn.Put(mock.Anything, mock.Anything, mock.Anything).Return(nil)
- err := f.users.WithTxn(mockTxn).Create(f.ctx, doc)
+ ctx := SetContextTxn(f.ctx, mockTxn)
+ err := f.users.Create(ctx, doc)
require.ErrorIs(f.t, err, NewErrFailedToStoreIndexedField("name", nil))
}
@@ -325,7 +339,7 @@ func TestNonUnique_IfDocDoesNotHaveIndexedField_SkipIndex(t *testing.T) {
}{Age: 21, Weight: 154.1})
require.NoError(f.t, err)
- doc, err := client.NewDocFromJSON(data, f.users.Schema())
+ doc, err := client.NewDocFromJSON(data, f.users.Definition())
require.NoError(f.t, err)
err = f.users.Create(f.ctx, doc)
@@ -348,7 +362,8 @@ func TestNonUnique_IfSystemStorageHasInvalidIndexDescription_Error(t *testing.T)
systemStoreOn.Query(mock.Anything, mock.Anything).
Return(mocks.NewQueryResultsWithValues(t, []byte("invalid")), nil)
- err := f.users.WithTxn(mockTxn).Create(f.ctx, doc)
+ ctx := SetContextTxn(f.ctx, mockTxn)
+ err := f.users.Create(ctx, doc)
assert.ErrorIs(t, err, datastore.NewErrInvalidStoredValue(nil))
}
@@ -366,7 +381,8 @@ func TestNonUnique_IfSystemStorageFailsToReadIndexDesc_Error(t *testing.T) {
systemStoreOn.Query(mock.Anything, mock.Anything).
Return(nil, testErr)
- err := f.users.WithTxn(mockTxn).Create(f.ctx, doc)
+ ctx := SetContextTxn(f.ctx, mockTxn)
+ err := f.users.Create(ctx, doc)
require.ErrorIs(t, err, testErr)
}
@@ -533,7 +549,7 @@ func TestNonUnique_IfIndexedFieldIsNil_StoreItAsNil(t *testing.T) {
}{Age: 44})
require.NoError(f.t, err)
- doc, err := client.NewDocFromJSON(docJSON, f.users.Schema())
+ doc, err := client.NewDocFromJSON(docJSON, f.users.Definition())
require.NoError(f.t, err)
f.saveDocToCollection(doc, f.users)
@@ -578,8 +594,30 @@ func TestNonUniqueCreate_IfUponIndexingExistingDocsFetcherFails_ReturnError(t *t
Name: "Fails to init",
PrepareFetcher: func() fetcher.Fetcher {
f := fetcherMocks.NewStubbedFetcher(t)
- f.EXPECT().Init(mock.Anything, mock.Anything, mock.Anything, mock.Anything, mock.Anything, mock.Anything, mock.Anything, mock.Anything).Unset()
- f.EXPECT().Init(mock.Anything, mock.Anything, mock.Anything, mock.Anything, mock.Anything, mock.Anything, mock.Anything, mock.Anything).Return(testError)
+ f.EXPECT().Init(
+ mock.Anything,
+ mock.Anything,
+ mock.Anything,
+ mock.Anything,
+ mock.Anything,
+ mock.Anything,
+ mock.Anything,
+ mock.Anything,
+ mock.Anything,
+ mock.Anything,
+ ).Unset()
+ f.EXPECT().Init(
+ mock.Anything,
+ mock.Anything,
+ mock.Anything,
+ mock.Anything,
+ mock.Anything,
+ mock.Anything,
+ mock.Anything,
+ mock.Anything,
+ mock.Anything,
+ mock.Anything,
+ ).Return(testError)
f.EXPECT().Close().Unset()
f.EXPECT().Close().Return(nil)
return f
@@ -756,7 +794,8 @@ func TestNonUniqueUpdate_IfFailsToReadIndexDescription_ReturnError(t *testing.T)
require.NoError(t, err)
// retrieve the collection without index cached
- usersCol, err := f.db.getCollectionByName(f.ctx, f.txn, usersColName)
+ ctx := SetContextTxn(f.ctx, f.txn)
+ usersCol, err := f.db.getCollectionByName(ctx, usersColName)
require.NoError(t, err)
testErr := errors.New("test error")
@@ -772,7 +811,8 @@ func TestNonUniqueUpdate_IfFailsToReadIndexDescription_ReturnError(t *testing.T)
usersCol.(*collection).fetcherFactory = func() fetcher.Fetcher {
return fetcherMocks.NewStubbedFetcher(t)
}
- err = usersCol.WithTxn(mockedTxn).Update(f.ctx, doc)
+ ctx = SetContextTxn(f.ctx, mockedTxn)
+ err = usersCol.Update(ctx, doc)
require.ErrorIs(t, err, testErr)
}
@@ -787,8 +827,30 @@ func TestNonUniqueUpdate_IfFetcherFails_ReturnError(t *testing.T) {
Name: "Fails to init",
PrepareFetcher: func() fetcher.Fetcher {
f := fetcherMocks.NewStubbedFetcher(t)
- f.EXPECT().Init(mock.Anything, mock.Anything, mock.Anything, mock.Anything, mock.Anything, mock.Anything, mock.Anything, mock.Anything).Unset()
- f.EXPECT().Init(mock.Anything, mock.Anything, mock.Anything, mock.Anything, mock.Anything, mock.Anything, mock.Anything, mock.Anything).Return(testError)
+ f.EXPECT().Init(
+ mock.Anything,
+ mock.Anything,
+ mock.Anything,
+ mock.Anything,
+ mock.Anything,
+ mock.Anything,
+ mock.Anything,
+ mock.Anything,
+ mock.Anything,
+ mock.Anything,
+ ).Unset()
+ f.EXPECT().Init(
+ mock.Anything,
+ mock.Anything,
+ mock.Anything,
+ mock.Anything,
+ mock.Anything,
+ mock.Anything,
+ mock.Anything,
+ mock.Anything,
+ mock.Anything,
+ mock.Anything,
+ ).Return(testError)
f.EXPECT().Close().Unset()
f.EXPECT().Close().Return(nil)
return f
@@ -886,11 +948,35 @@ func TestNonUniqueUpdate_ShouldPassToFetcherOnlyRelevantFields(t *testing.T) {
f.users.(*collection).fetcherFactory = func() fetcher.Fetcher {
f := fetcherMocks.NewStubbedFetcher(t)
- f.EXPECT().Init(mock.Anything, mock.Anything, mock.Anything, mock.Anything, mock.Anything, mock.Anything, mock.Anything, mock.Anything).Unset()
- f.EXPECT().Init(mock.Anything, mock.Anything, mock.Anything, mock.Anything, mock.Anything, mock.Anything, mock.Anything, mock.Anything).
+ f.EXPECT().Init(
+ mock.Anything,
+ mock.Anything,
+ mock.Anything,
+ mock.Anything,
+ mock.Anything,
+ mock.Anything,
+ mock.Anything,
+ mock.Anything,
+ mock.Anything,
+ mock.Anything,
+ ).Unset()
+ f.EXPECT().Init(
+ mock.Anything,
+ mock.Anything,
+ mock.Anything,
+ mock.Anything,
+ mock.Anything,
+ mock.Anything,
+ mock.Anything,
+ mock.Anything,
+ mock.Anything,
+ mock.Anything,
+ ).
RunAndReturn(func(
ctx context.Context,
+ identity immutable.Option[acpIdentity.Identity],
txn datastore.Txn,
+ acp immutable.Option[acp.ACP],
col client.Collection,
fields []client.FieldDefinition,
filter *mapper.Filter,
@@ -968,7 +1054,8 @@ func TestNonUniqueUpdate_IfDatastoreFails_ReturnError(t *testing.T) {
mockedTxn.EXPECT().Datastore().Unset()
mockedTxn.EXPECT().Datastore().Return(mockedTxn.MockDatastore).Maybe()
- err = f.users.WithTxn(mockedTxn).Update(f.ctx, doc)
+ ctx := SetContextTxn(f.ctx, mockedTxn)
+ err = f.users.Update(ctx, doc)
require.ErrorIs(t, err, testErr)
}
}
@@ -983,7 +1070,7 @@ func TestNonUpdate_IfIndexedFieldWasNil_ShouldDeleteIt(t *testing.T) {
}{Age: 44})
require.NoError(f.t, err)
- doc, err := client.NewDocFromJSON(docJSON, f.users.Schema())
+ doc, err := client.NewDocFromJSON(docJSON, f.users.Definition())
require.NoError(f.t, err)
f.saveDocToCollection(doc, f.users)
@@ -1069,7 +1156,7 @@ func TestUnique_IfIndexedFieldIsNil_StoreItAsNil(t *testing.T) {
}{Age: 44})
require.NoError(f.t, err)
- doc, err := client.NewDocFromJSON(docJSON, f.users.Schema())
+ doc, err := client.NewDocFromJSON(docJSON, f.users.Definition())
require.NoError(f.t, err)
f.saveDocToCollection(doc, f.users)
@@ -1183,7 +1270,7 @@ func TestComposite_IfIndexedFieldIsNil_StoreItAsNil(t *testing.T) {
}{Age: 44})
require.NoError(f.t, err)
- doc, err := client.NewDocFromJSON(docJSON, f.users.Schema())
+ doc, err := client.NewDocFromJSON(docJSON, f.users.Definition())
require.NoError(f.t, err)
f.saveDocToCollection(doc, f.users)
@@ -1196,7 +1283,7 @@ func TestComposite_IfIndexedFieldIsNil_StoreItAsNil(t *testing.T) {
assert.Len(t, data, 0)
}
-func TestComposite_IfNilUpdateToValue_ShouldUpdateIndexStored(t *testing.T) {
+func TestUniqueComposite_IfNilUpdateToValue_ShouldUpdateIndexStored(t *testing.T) {
testCases := []struct {
Name string
Doc string
@@ -1238,34 +1325,36 @@ func TestComposite_IfNilUpdateToValue_ShouldUpdateIndexStored(t *testing.T) {
}
for _, tc := range testCases {
- f := newIndexTestFixture(t)
- defer f.db.Close()
+ t.Run(tc.Name, func(t *testing.T) {
+ f := newIndexTestFixture(t)
+ defer f.db.Close()
- indexDesc := makeUnique(addFieldToIndex(getUsersIndexDescOnName(), usersAgeFieldName))
- _, err := f.createCollectionIndexFor(f.users.Name().Value(), indexDesc)
- require.NoError(f.t, err)
- f.commitTxn()
+ indexDesc := makeUnique(addFieldToIndex(getUsersIndexDescOnName(), usersAgeFieldName))
+ _, err := f.createCollectionIndexFor(f.users.Name().Value(), indexDesc)
+ require.NoError(f.t, err)
+ f.commitTxn()
- doc, err := client.NewDocFromJSON([]byte(tc.Doc), f.users.Schema())
- require.NoError(f.t, err)
+ doc, err := client.NewDocFromJSON([]byte(tc.Doc), f.users.Definition())
+ require.NoError(f.t, err)
- f.saveDocToCollection(doc, f.users)
+ f.saveDocToCollection(doc, f.users)
- oldKey := newIndexKeyBuilder(f).Col(usersColName).Fields(usersNameFieldName, usersAgeFieldName).
- Doc(doc).Unique().Build()
+ oldKey := newIndexKeyBuilder(f).Col(usersColName).Fields(usersNameFieldName, usersAgeFieldName).
+ Doc(doc).Unique().Build()
- require.NoError(t, doc.SetWithJSON([]byte(tc.Update)))
+ require.NoError(t, doc.SetWithJSON([]byte(tc.Update)))
- newKey := newIndexKeyBuilder(f).Col(usersColName).Fields(usersNameFieldName, usersAgeFieldName).
- Doc(doc).Unique().Build()
+ newKey := newIndexKeyBuilder(f).Col(usersColName).Fields(usersNameFieldName, usersAgeFieldName).
+ Doc(doc).Unique().Build()
- require.NoError(t, f.users.Update(f.ctx, doc), tc.Name)
- f.commitTxn()
+ require.NoError(t, f.users.Update(f.ctx, doc), tc.Name)
+ f.commitTxn()
- _, err = f.txn.Datastore().Get(f.ctx, oldKey.ToDS())
- require.Error(t, err, oldKey.ToString(), oldKey.ToDS(), tc.Name)
- _, err = f.txn.Datastore().Get(f.ctx, newKey.ToDS())
- require.NoError(t, err, newKey.ToString(), newKey.ToDS(), tc.Name)
+ _, err = f.txn.Datastore().Get(f.ctx, oldKey.ToDS())
+ require.Error(t, err, oldKey.ToString(), oldKey.ToDS(), tc.Name)
+ _, err = f.txn.Datastore().Get(f.ctx, newKey.ToDS())
+ require.NoError(t, err, newKey.ToString(), newKey.ToDS(), tc.Name)
+ })
}
}
diff --git a/db/lens.go b/db/lens.go
index d5240dad83..f21d084f88 100644
--- a/db/lens.go
+++ b/db/lens.go
@@ -18,12 +18,13 @@ import (
"github.com/sourcenetwork/defradb/client"
"github.com/sourcenetwork/defradb/core"
- "github.com/sourcenetwork/defradb/datastore"
"github.com/sourcenetwork/defradb/db/description"
"github.com/sourcenetwork/defradb/errors"
)
-func (db *db) setMigration(ctx context.Context, txn datastore.Txn, cfg client.LensConfig) error {
+func (db *db) setMigration(ctx context.Context, cfg client.LensConfig) error {
+ txn := mustGetContextTxn(ctx)
+
dstCols, err := description.GetCollectionsBySchemaVersionID(ctx, txn, cfg.DestinationSchemaVersionID)
if err != nil {
return err
@@ -34,7 +35,7 @@ func (db *db) setMigration(ctx context.Context, txn datastore.Txn, cfg client.Le
return err
}
- colSeq, err := db.getSequence(ctx, txn, core.CollectionIDSequenceKey{})
+ colSeq, err := db.getSequence(ctx, core.CollectionIDSequenceKey{})
if err != nil {
return err
}
@@ -42,7 +43,7 @@ func (db *db) setMigration(ctx context.Context, txn datastore.Txn, cfg client.Le
if len(sourceCols) == 0 {
// If no collections are found with the given [SourceSchemaVersionID], this migration must be from
// a collection/schema version that does not yet exist locally. We must now create it.
- colID, err := colSeq.next(ctx, txn)
+ colID, err := colSeq.next(ctx)
if err != nil {
return err
}
@@ -86,7 +87,7 @@ func (db *db) setMigration(ctx context.Context, txn datastore.Txn, cfg client.Le
if !isDstCollectionFound {
// If the destination collection was not found, we must create it. This can happen when setting a migration
// to a schema version that does not yet exist locally.
- colID, err := colSeq.next(ctx, txn)
+ colID, err := colSeq.next(ctx)
if err != nil {
return err
}
diff --git a/db/permission/check.go b/db/permission/check.go
new file mode 100644
index 0000000000..36dce10489
--- /dev/null
+++ b/db/permission/check.go
@@ -0,0 +1,92 @@
+// Copyright 2024 Democratized Data Foundation
+//
+// Use of this software is governed by the Business Source License
+// included in the file licenses/BSL.txt.
+//
+// As of the Change Date specified in that file, in accordance with
+// the Business Source License, use of this software will be governed
+// by the Apache License, Version 2.0, included in the file
+// licenses/APL.txt.
+
+package permission
+
+import (
+ "context"
+
+ "github.com/sourcenetwork/immutable"
+
+ "github.com/sourcenetwork/defradb/acp"
+ acpIdentity "github.com/sourcenetwork/defradb/acp/identity"
+ "github.com/sourcenetwork/defradb/client"
+)
+
+// CheckAccessOfDocOnCollectionWithACP handles the check, which tells us if access to the target
+// document is valid, with respect to the permission type, and the specified collection.
+//
+// This function should only be called if acp is available. As we have unrestricted
+// access when acp is not available (acp turned off).
+//
+// Since we know acp is enabled we have these components to check in this function:
+// (1) the request is permissioned (has an identity),
+// (2) the collection is permissioned (has a policy),
+//
+// Unrestricted Access to document if:
+// - (2) is false.
+// - Document is public (unregistered), whether signatured request or not doesn't matter.
+func CheckAccessOfDocOnCollectionWithACP(
+ ctx context.Context,
+ identity immutable.Option[acpIdentity.Identity],
+ acpSystem acp.ACP,
+ collection client.Collection,
+ permission acp.DPIPermission,
+ docID string,
+) (bool, error) {
+ // Even if acp exists, but there is no policy on the collection (unpermissioned collection)
+ // then we still have unrestricted access.
+ policyID, resourceName, hasPolicy := isPermissioned(collection)
+ if !hasPolicy {
+ return true, nil
+ }
+
+ // Now that we know acp is available and the collection is permissioned, before checking access with
+ // acp directly we need to make sure that the document is not public, as public documents will not
+ // be regestered with acp. We give unrestricted access to public documents, so it does not matter
+ // whether the request has a signature identity or not at this stage of the check.
+ isRegistered, err := acpSystem.IsDocRegistered(
+ ctx,
+ policyID,
+ resourceName,
+ docID,
+ )
+ if err != nil {
+ return false, err
+ }
+
+ if !isRegistered {
+ // Unrestricted access as it is a public document.
+ return true, nil
+ }
+
+ // At this point if the request is not signatured, then it has no access, because:
+ // the collection has a policy on it, and the acp is enabled/available,
+ // and the document is not public (is regestered with acp).
+ if !identity.HasValue() {
+ return false, nil
+ }
+
+ // Now actually check using the signature if this identity has access or not.
+ hasAccess, err := acpSystem.CheckDocAccess(
+ ctx,
+ permission,
+ identity.Value().String(),
+ policyID,
+ resourceName,
+ docID,
+ )
+
+ if err != nil {
+ return false, err
+ }
+
+ return hasAccess, nil
+}
diff --git a/db/permission/permission.go b/db/permission/permission.go
new file mode 100644
index 0000000000..3b365cba75
--- /dev/null
+++ b/db/permission/permission.go
@@ -0,0 +1,32 @@
+// Copyright 2024 Democratized Data Foundation
+//
+// Use of this software is governed by the Business Source License
+// included in the file licenses/BSL.txt.
+//
+// As of the Change Date specified in that file, in accordance with
+// the Business Source License, use of this software will be governed
+// by the Apache License, Version 2.0, included in the file
+// licenses/APL.txt.
+
+package permission
+
+import (
+ "github.com/sourcenetwork/defradb/client"
+)
+
+// isPermissioned returns true if the collection has a policy, otherwise returns false.
+//
+// This tells us if access control is enabled for this collection or not.
+//
+// When there is a policy, in addition to returning true in the last return value, the
+// first returned value is policyID, second is the resource name.
+func isPermissioned(collection client.Collection) (string, string, bool) {
+ policy := collection.Definition().Description.Policy
+ if policy.HasValue() &&
+ policy.Value().ID != "" &&
+ policy.Value().ResourceName != "" {
+ return policy.Value().ID, policy.Value().ResourceName, true
+ }
+
+ return "", "", false
+}
diff --git a/db/permission/register.go b/db/permission/register.go
new file mode 100644
index 0000000000..a46e5eef34
--- /dev/null
+++ b/db/permission/register.go
@@ -0,0 +1,51 @@
+// Copyright 2024 Democratized Data Foundation
+//
+// Use of this software is governed by the Business Source License
+// included in the file licenses/BSL.txt.
+//
+// As of the Change Date specified in that file, in accordance with
+// the Business Source License, use of this software will be governed
+// by the Apache License, Version 2.0, included in the file
+// licenses/APL.txt.
+
+package permission
+
+import (
+ "context"
+
+ "github.com/sourcenetwork/immutable"
+
+ "github.com/sourcenetwork/defradb/acp"
+ acpIdentity "github.com/sourcenetwork/defradb/acp/identity"
+ "github.com/sourcenetwork/defradb/client"
+)
+
+// RegisterDocOnCollectionWithACP handles the registration of the document with acp.
+//
+// Since acp will always exist when this is called we have these components to worry about:
+// (1) the request is permissioned (has an identity signature),
+// (2) the collection is permissioned (has a policy),
+//
+// The document is only registered if all (1) (2) are true.
+//
+// Otherwise, nothing is registered with acp.
+func RegisterDocOnCollectionWithACP(
+ ctx context.Context,
+ identity immutable.Option[acpIdentity.Identity],
+ acpSystem acp.ACP,
+ collection client.Collection,
+ docID string,
+) error {
+ // An identity exists and the collection has a policy.
+ if policyID, resourceName, hasPolicy := isPermissioned(collection); hasPolicy && identity.HasValue() {
+ return acpSystem.RegisterDocObject(
+ ctx,
+ identity.Value().String(),
+ policyID,
+ resourceName,
+ docID,
+ )
+ }
+
+ return nil
+}
diff --git a/db/request.go b/db/request.go
index 69eabebd34..83a2fb09bb 100644
--- a/db/request.go
+++ b/db/request.go
@@ -14,12 +14,11 @@ import (
"context"
"github.com/sourcenetwork/defradb/client"
- "github.com/sourcenetwork/defradb/datastore"
"github.com/sourcenetwork/defradb/planner"
)
// execRequest executes a request against the database.
-func (db *db) execRequest(ctx context.Context, request string, txn datastore.Txn) *client.RequestResult {
+func (db *db) execRequest(ctx context.Context, request string) *client.RequestResult {
res := &client.RequestResult{}
ast, err := db.parser.BuildRequestAST(request)
if err != nil {
@@ -48,7 +47,15 @@ func (db *db) execRequest(ctx context.Context, request string, txn datastore.Txn
return res
}
- planner := planner.New(ctx, db.WithTxn(txn), txn)
+ txn := mustGetContextTxn(ctx)
+ identity := GetContextIdentity(ctx)
+ planner := planner.New(
+ ctx,
+ identity,
+ db.acp,
+ db,
+ txn,
+ )
results, err := planner.RunRequest(ctx, parsedRequest)
if err != nil {
diff --git a/db/schema.go b/db/schema.go
index a4582158f3..756c02f1ff 100644
--- a/db/schema.go
+++ b/db/schema.go
@@ -23,7 +23,6 @@ import (
"github.com/sourcenetwork/immutable"
"github.com/sourcenetwork/defradb/client"
- "github.com/sourcenetwork/defradb/datastore"
"github.com/sourcenetwork/defradb/db/description"
)
@@ -37,7 +36,6 @@ const (
// and creates the necessary collections, request types, etc.
func (db *db) addSchema(
ctx context.Context,
- txn datastore.Txn,
schemaString string,
) ([]client.CollectionDescription, error) {
newDefinitions, err := db.parser.ParseSDL(ctx, schemaString)
@@ -47,14 +45,20 @@ func (db *db) addSchema(
returnDescriptions := make([]client.CollectionDescription, len(newDefinitions))
for i, definition := range newDefinitions {
- col, err := db.createCollection(ctx, txn, definition)
+ // Only accept the schema if policy description is valid, otherwise reject the schema.
+ err := db.validateCollectionDefinitionPolicyDesc(ctx, definition.Description.Policy)
+ if err != nil {
+ return nil, err
+ }
+
+ col, err := db.createCollection(ctx, definition, newDefinitions)
if err != nil {
return nil, err
}
returnDescriptions[i] = col.Description()
}
- err = db.loadSchema(ctx, txn)
+ err = db.loadSchema(ctx)
if err != nil {
return nil, err
}
@@ -62,8 +66,10 @@ func (db *db) addSchema(
return returnDescriptions, nil
}
-func (db *db) loadSchema(ctx context.Context, txn datastore.Txn) error {
- definitions, err := db.getAllActiveDefinitions(ctx, txn)
+func (db *db) loadSchema(ctx context.Context) error {
+ txn := mustGetContextTxn(ctx)
+
+ definitions, err := db.getAllActiveDefinitions(ctx)
if err != nil {
return err
}
@@ -84,11 +90,12 @@ func (db *db) loadSchema(ctx context.Context, txn datastore.Txn) error {
// will be applied.
func (db *db) patchSchema(
ctx context.Context,
- txn datastore.Txn,
patchString string,
migration immutable.Option[model.Lens],
setAsDefaultVersion bool,
) error {
+ txn := mustGetContextTxn(ctx)
+
patch, err := jsonpatch.DecodePatch([]byte(patchString))
if err != nil {
return err
@@ -131,7 +138,6 @@ func (db *db) patchSchema(
for _, schema := range newSchemaByName {
err := db.updateSchema(
ctx,
- txn,
existingSchemaByName,
newSchemaByName,
schema,
@@ -143,7 +149,7 @@ func (db *db) patchSchema(
}
}
- return db.loadSchema(ctx, txn)
+ return db.loadSchema(ctx)
}
// substituteSchemaPatch handles any substitution of values that may be required before
@@ -170,10 +176,10 @@ func substituteSchemaPatch(
return nil, err
}
- path = strings.TrimPrefix(path, "/")
- splitPath := strings.Split(path, "/")
-
if value, hasValue := patchOperation["value"]; hasValue {
+ path = strings.TrimPrefix(path, "/")
+ splitPath := strings.Split(path, "/")
+
var newPatchValue immutable.Option[any]
var field map[string]any
isField := isField(splitPath)
@@ -223,40 +229,6 @@ func substituteSchemaPatch(
}
}
- if isField {
- if kind, isString := field["Kind"].(string); isString {
- substitute, schemaName, err := getSubstituteFieldKind(kind, schemaByName)
- if err != nil {
- return nil, err
- }
-
- field["Kind"] = substitute
- if schemaName != "" {
- if field["Schema"] != nil && field["Schema"] != schemaName {
- return nil, NewErrFieldKindDoesNotMatchFieldSchema(kind, field["Schema"].(string))
- }
- field["Schema"] = schemaName
- }
-
- newPatchValue = immutable.Some[any](field)
- }
- } else if isFieldKind(splitPath) {
- var kind any
- err = json.Unmarshal(*value, &kind)
- if err != nil {
- return nil, err
- }
-
- if kind, isString := kind.(string); isString {
- substitute, _, err := getSubstituteFieldKind(kind, schemaByName)
- if err != nil {
- return nil, err
- }
-
- newPatchValue = immutable.Some[any](substitute)
- }
- }
-
if newPatchValue.HasValue() {
substitute, err := json.Marshal(newPatchValue.Value())
if err != nil {
@@ -274,10 +246,9 @@ func substituteSchemaPatch(
func (db *db) getSchemaByVersionID(
ctx context.Context,
- txn datastore.Txn,
versionID string,
) (client.SchemaDescription, error) {
- schemas, err := db.getSchemas(ctx, txn, client.SchemaFetchOptions{ID: immutable.Some(versionID)})
+ schemas, err := db.getSchemas(ctx, client.SchemaFetchOptions{ID: immutable.Some(versionID)})
if err != nil {
return client.SchemaDescription{}, err
}
@@ -288,9 +259,10 @@ func (db *db) getSchemaByVersionID(
func (db *db) getSchemas(
ctx context.Context,
- txn datastore.Txn,
options client.SchemaFetchOptions,
) ([]client.SchemaDescription, error) {
+ txn := mustGetContextTxn(ctx)
+
schemas := []client.SchemaDescription{}
switch {
@@ -331,36 +303,6 @@ func (db *db) getSchemas(
return result, nil
}
-// getSubstituteFieldKind checks and attempts to get the underlying integer value for the given string
-// Field Kind value. It will return the value if one is found, else returns an [ErrFieldKindNotFound].
-//
-// If the value represents a foreign relation the collection name will also be returned.
-func getSubstituteFieldKind(
- kind string,
- schemaByName map[string]client.SchemaDescription,
-) (client.FieldKind, string, error) {
- substitute, substituteFound := client.FieldKindStringToEnumMapping[kind]
- if substituteFound {
- return substitute, "", nil
- } else {
- var collectionName string
- var substitute client.FieldKind
- if len(kind) > 0 && kind[0] == '[' && kind[len(kind)-1] == ']' {
- collectionName = kind[1 : len(kind)-1]
- substitute = client.FieldKind_FOREIGN_OBJECT_ARRAY
- } else {
- collectionName = kind
- substitute = client.FieldKind_FOREIGN_OBJECT
- }
-
- if _, substituteFound := schemaByName[collectionName]; substituteFound {
- return substitute, collectionName, nil
- }
-
- return 0, "", NewErrFieldKindNotFound(kind)
- }
-}
-
// isFieldOrInner returns true if the given path points to a SchemaFieldDescription or a property within it.
func isFieldOrInner(path []string) bool {
//nolint:goconst
@@ -372,13 +314,6 @@ func isField(path []string) bool {
return len(path) == 3 && path[fieldsPathIndex] == "Fields"
}
-// isField returns true if the given path points to a SchemaFieldDescription.Kind property.
-func isFieldKind(path []string) bool {
- return len(path) == 4 &&
- path[fieldIndexPathIndex+1] == "Kind" &&
- path[fieldsPathIndex] == "Fields"
-}
-
// containsLetter returns true if the string contains a single unicode character.
func containsLetter(s string) bool {
for _, r := range s {
diff --git a/db/sequence.go b/db/sequence.go
index 3c510ec78c..f39bdcfb65 100644
--- a/db/sequence.go
+++ b/db/sequence.go
@@ -17,7 +17,6 @@ import (
ds "github.com/ipfs/go-datastore"
"github.com/sourcenetwork/defradb/core"
- "github.com/sourcenetwork/defradb/datastore"
"github.com/sourcenetwork/defradb/errors"
)
@@ -26,15 +25,15 @@ type sequence struct {
val uint64
}
-func (db *db) getSequence(ctx context.Context, txn datastore.Txn, key core.Key) (*sequence, error) {
+func (db *db) getSequence(ctx context.Context, key core.Key) (*sequence, error) {
seq := &sequence{
key: key,
val: uint64(0),
}
- _, err := seq.get(ctx, txn)
+ _, err := seq.get(ctx)
if errors.Is(err, ds.ErrNotFound) {
- err = seq.update(ctx, txn)
+ err = seq.update(ctx)
if err != nil {
return nil, err
}
@@ -45,7 +44,9 @@ func (db *db) getSequence(ctx context.Context, txn datastore.Txn, key core.Key)
return seq, nil
}
-func (seq *sequence) get(ctx context.Context, txn datastore.Txn) (uint64, error) {
+func (seq *sequence) get(ctx context.Context) (uint64, error) {
+ txn := mustGetContextTxn(ctx)
+
val, err := txn.Systemstore().Get(ctx, seq.key.ToDS())
if err != nil {
return 0, err
@@ -55,7 +56,9 @@ func (seq *sequence) get(ctx context.Context, txn datastore.Txn) (uint64, error)
return seq.val, nil
}
-func (seq *sequence) update(ctx context.Context, txn datastore.Txn) error {
+func (seq *sequence) update(ctx context.Context) error {
+ txn := mustGetContextTxn(ctx)
+
var buf [8]byte
binary.BigEndian.PutUint64(buf[:], seq.val)
if err := txn.Systemstore().Put(ctx, seq.key.ToDS(), buf[:]); err != nil {
@@ -65,12 +68,12 @@ func (seq *sequence) update(ctx context.Context, txn datastore.Txn) error {
return nil
}
-func (seq *sequence) next(ctx context.Context, txn datastore.Txn) (uint64, error) {
- _, err := seq.get(ctx, txn)
+func (seq *sequence) next(ctx context.Context) (uint64, error) {
+ _, err := seq.get(ctx)
if err != nil {
return 0, err
}
seq.val++
- return seq.val, seq.update(ctx, txn)
+ return seq.val, seq.update(ctx)
}
diff --git a/db/store.go b/db/store.go
new file mode 100644
index 0000000000..1686b9af3e
--- /dev/null
+++ b/db/store.go
@@ -0,0 +1,271 @@
+// Copyright 2024 Democratized Data Foundation
+//
+// Use of this software is governed by the Business Source License
+// included in the file licenses/BSL.txt.
+//
+// As of the Change Date specified in that file, in accordance with
+// the Business Source License, use of this software will be governed
+// by the Apache License, Version 2.0, included in the file
+// licenses/APL.txt.
+
+package db
+
+import (
+ "context"
+
+ "github.com/lens-vm/lens/host-go/config/model"
+
+ "github.com/sourcenetwork/immutable"
+
+ "github.com/sourcenetwork/defradb/client"
+)
+
+// ExecRequest executes a request against the database.
+func (db *db) ExecRequest(ctx context.Context, request string) *client.RequestResult {
+ ctx, txn, err := ensureContextTxn(ctx, db, false)
+ if err != nil {
+ res := &client.RequestResult{}
+ res.GQL.Errors = []error{err}
+ return res
+ }
+ defer txn.Discard(ctx)
+
+ res := db.execRequest(ctx, request)
+ if len(res.GQL.Errors) > 0 {
+ return res
+ }
+
+ if err := txn.Commit(ctx); err != nil {
+ res.GQL.Errors = []error{err}
+ return res
+ }
+
+ return res
+}
+
+// GetCollectionByName returns an existing collection within the database.
+func (db *db) GetCollectionByName(ctx context.Context, name string) (client.Collection, error) {
+ ctx, txn, err := ensureContextTxn(ctx, db, true)
+ if err != nil {
+ return nil, err
+ }
+ defer txn.Discard(ctx)
+
+ return db.getCollectionByName(ctx, name)
+}
+
+// GetCollections gets all the currently defined collections.
+func (db *db) GetCollections(
+ ctx context.Context,
+ options client.CollectionFetchOptions,
+) ([]client.Collection, error) {
+ ctx, txn, err := ensureContextTxn(ctx, db, true)
+ if err != nil {
+ return nil, err
+ }
+ defer txn.Discard(ctx)
+
+ return db.getCollections(ctx, options)
+}
+
+// GetSchemaByVersionID returns the schema description for the schema version of the
+// ID provided.
+//
+// Will return an error if it is not found.
+func (db *db) GetSchemaByVersionID(ctx context.Context, versionID string) (client.SchemaDescription, error) {
+ ctx, txn, err := ensureContextTxn(ctx, db, true)
+ if err != nil {
+ return client.SchemaDescription{}, err
+ }
+ defer txn.Discard(ctx)
+
+ return db.getSchemaByVersionID(ctx, versionID)
+}
+
+// GetSchemas returns all schema versions that currently exist within
+// this [Store].
+func (db *db) GetSchemas(
+ ctx context.Context,
+ options client.SchemaFetchOptions,
+) ([]client.SchemaDescription, error) {
+ ctx, txn, err := ensureContextTxn(ctx, db, true)
+ if err != nil {
+ return nil, err
+ }
+ defer txn.Discard(ctx)
+
+ return db.getSchemas(ctx, options)
+}
+
+// GetAllIndexes gets all the indexes in the database.
+func (db *db) GetAllIndexes(
+ ctx context.Context,
+) (map[client.CollectionName][]client.IndexDescription, error) {
+ ctx, txn, err := ensureContextTxn(ctx, db, true)
+ if err != nil {
+ return nil, err
+ }
+ defer txn.Discard(ctx)
+
+ return db.getAllIndexDescriptions(ctx)
+}
+
+// AddSchema takes the provided GQL schema in SDL format, and applies it to the database,
+// creating the necessary collections, request types, etc.
+//
+// All schema types provided must not exist prior to calling this, and they may not reference existing
+// types previously defined.
+func (db *db) AddSchema(ctx context.Context, schemaString string) ([]client.CollectionDescription, error) {
+ ctx, txn, err := ensureContextTxn(ctx, db, false)
+ if err != nil {
+ return nil, err
+ }
+ defer txn.Discard(ctx)
+
+ cols, err := db.addSchema(ctx, schemaString)
+ if err != nil {
+ return nil, err
+ }
+
+ if err := txn.Commit(ctx); err != nil {
+ return nil, err
+ }
+ return cols, nil
+}
+
+// PatchSchema takes the given JSON patch string and applies it to the set of CollectionDescriptions
+// present in the database.
+//
+// It will also update the GQL types used by the query system. It will error and not apply any of the
+// requested, valid updates should the net result of the patch result in an invalid state. The
+// individual operations defined in the patch do not need to result in a valid state, only the net result
+// of the full patch.
+//
+// The collections (including the schema version ID) will only be updated if any changes have actually
+// been made, if the net result of the patch matches the current persisted description then no changes
+// will be applied.
+func (db *db) PatchSchema(
+ ctx context.Context,
+ patchString string,
+ migration immutable.Option[model.Lens],
+ setAsDefaultVersion bool,
+) error {
+ ctx, txn, err := ensureContextTxn(ctx, db, false)
+ if err != nil {
+ return err
+ }
+ defer txn.Discard(ctx)
+
+ err = db.patchSchema(ctx, patchString, migration, setAsDefaultVersion)
+ if err != nil {
+ return err
+ }
+
+ return txn.Commit(ctx)
+}
+
+func (db *db) PatchCollection(
+ ctx context.Context,
+ patchString string,
+) error {
+ ctx, txn, err := ensureContextTxn(ctx, db, false)
+ if err != nil {
+ return err
+ }
+ defer txn.Discard(ctx)
+
+ err = db.patchCollection(ctx, patchString)
+ if err != nil {
+ return err
+ }
+
+ return txn.Commit(ctx)
+}
+
+func (db *db) SetActiveSchemaVersion(ctx context.Context, schemaVersionID string) error {
+ ctx, txn, err := ensureContextTxn(ctx, db, false)
+ if err != nil {
+ return err
+ }
+ defer txn.Discard(ctx)
+
+ err = db.setActiveSchemaVersion(ctx, schemaVersionID)
+ if err != nil {
+ return err
+ }
+
+ return txn.Commit(ctx)
+}
+
+func (db *db) SetMigration(ctx context.Context, cfg client.LensConfig) error {
+ ctx, txn, err := ensureContextTxn(ctx, db, false)
+ if err != nil {
+ return err
+ }
+ defer txn.Discard(ctx)
+
+ err = db.setMigration(ctx, cfg)
+ if err != nil {
+ return err
+ }
+
+ return txn.Commit(ctx)
+}
+
+func (db *db) AddView(
+ ctx context.Context,
+ query string,
+ sdl string,
+ transform immutable.Option[model.Lens],
+) ([]client.CollectionDefinition, error) {
+ ctx, txn, err := ensureContextTxn(ctx, db, false)
+ if err != nil {
+ return nil, err
+ }
+ defer txn.Discard(ctx)
+
+ defs, err := db.addView(ctx, query, sdl, transform)
+ if err != nil {
+ return nil, err
+ }
+
+ err = txn.Commit(ctx)
+ if err != nil {
+ return nil, err
+ }
+
+ return defs, nil
+}
+
+// BasicImport imports a json dataset.
+// filepath must be accessible to the node.
+func (db *db) BasicImport(ctx context.Context, filepath string) error {
+ ctx, txn, err := ensureContextTxn(ctx, db, false)
+ if err != nil {
+ return err
+ }
+ defer txn.Discard(ctx)
+
+ err = db.basicImport(ctx, filepath)
+ if err != nil {
+ return err
+ }
+
+ return txn.Commit(ctx)
+}
+
+// BasicExport exports the current data or subset of data to file in json format.
+func (db *db) BasicExport(ctx context.Context, config *client.BackupConfig) error {
+ ctx, txn, err := ensureContextTxn(ctx, db, true)
+ if err != nil {
+ return err
+ }
+ defer txn.Discard(ctx)
+
+ err = db.basicExport(ctx, config)
+ if err != nil {
+ return err
+ }
+
+ return txn.Commit(ctx)
+}
diff --git a/db/subscriptions.go b/db/subscriptions.go
index 2e7d2d4123..0d16074887 100644
--- a/db/subscriptions.go
+++ b/db/subscriptions.go
@@ -15,7 +15,6 @@ import (
"github.com/sourcenetwork/defradb/client"
"github.com/sourcenetwork/defradb/client/request"
- "github.com/sourcenetwork/defradb/datastore"
"github.com/sourcenetwork/defradb/events"
"github.com/sourcenetwork/defradb/planner"
)
@@ -55,24 +54,31 @@ func (db *db) handleSubscription(
for evt := range pub.Event() {
txn, err := db.NewTxn(ctx, false)
if err != nil {
- log.Error(ctx, err.Error())
+ log.ErrorContext(ctx, err.Error())
continue
}
- db.handleEvent(ctx, txn, pub, evt, r)
-
+ ctx := SetContextTxn(ctx, txn)
+ db.handleEvent(ctx, pub, evt, r)
txn.Discard(ctx)
}
}
func (db *db) handleEvent(
ctx context.Context,
- txn datastore.Txn,
pub *events.Publisher[events.Update],
evt events.Update,
r *request.ObjectSubscription,
) {
- p := planner.New(ctx, db.WithTxn(txn), txn)
+ txn := mustGetContextTxn(ctx)
+ identity := GetContextIdentity(ctx)
+ p := planner.New(
+ ctx,
+ identity,
+ db.acp,
+ db,
+ txn,
+ )
s := r.ToSelect(evt.DocID, evt.Cid.String())
diff --git a/db/txn_db.go b/db/txn_db.go
deleted file mode 100644
index f2fbe7cea3..0000000000
--- a/db/txn_db.go
+++ /dev/null
@@ -1,391 +0,0 @@
-// Copyright 2023 Democratized Data Foundation
-//
-// Use of this software is governed by the Business Source License
-// included in the file licenses/BSL.txt.
-//
-// As of the Change Date specified in that file, in accordance with
-// the Business Source License, use of this software will be governed
-// by the Apache License, Version 2.0, included in the file
-// licenses/APL.txt.
-
-package db
-
-import (
- "context"
-
- "github.com/lens-vm/lens/host-go/config/model"
- "github.com/sourcenetwork/immutable"
-
- "github.com/sourcenetwork/defradb/client"
- "github.com/sourcenetwork/defradb/datastore"
-)
-
-var _ client.DB = (*implicitTxnDB)(nil)
-var _ client.DB = (*explicitTxnDB)(nil)
-var _ client.Store = (*implicitTxnDB)(nil)
-var _ client.Store = (*explicitTxnDB)(nil)
-
-type implicitTxnDB struct {
- *db
-}
-
-type explicitTxnDB struct {
- *db
- txn datastore.Txn
- lensRegistry client.LensRegistry
-}
-
-// ExecRequest executes a request against the database.
-func (db *implicitTxnDB) ExecRequest(ctx context.Context, request string) *client.RequestResult {
- txn, err := db.NewTxn(ctx, false)
- if err != nil {
- res := &client.RequestResult{}
- res.GQL.Errors = []error{err}
- return res
- }
- defer txn.Discard(ctx)
-
- res := db.execRequest(ctx, request, txn)
- if len(res.GQL.Errors) > 0 {
- return res
- }
-
- if err := txn.Commit(ctx); err != nil {
- res.GQL.Errors = []error{err}
- return res
- }
-
- return res
-}
-
-// ExecRequest executes a transaction request against the database.
-func (db *explicitTxnDB) ExecRequest(
- ctx context.Context,
- request string,
-) *client.RequestResult {
- return db.execRequest(ctx, request, db.txn)
-}
-
-// GetCollectionByName returns an existing collection within the database.
-func (db *implicitTxnDB) GetCollectionByName(ctx context.Context, name string) (client.Collection, error) {
- txn, err := db.NewTxn(ctx, true)
- if err != nil {
- return nil, err
- }
- defer txn.Discard(ctx)
-
- return db.getCollectionByName(ctx, txn, name)
-}
-
-// GetCollectionByName returns an existing collection within the database.
-func (db *explicitTxnDB) GetCollectionByName(ctx context.Context, name string) (client.Collection, error) {
- col, err := db.getCollectionByName(ctx, db.txn, name)
- if err != nil {
- return nil, err
- }
-
- return col.WithTxn(db.txn), nil
-}
-
-// GetCollections gets all the currently defined collections.
-func (db *implicitTxnDB) GetCollections(
- ctx context.Context,
- options client.CollectionFetchOptions,
-) ([]client.Collection, error) {
- txn, err := db.NewTxn(ctx, true)
- if err != nil {
- return nil, err
- }
- defer txn.Discard(ctx)
-
- return db.getCollections(ctx, txn, options)
-}
-
-// GetCollections gets all the currently defined collections.
-func (db *explicitTxnDB) GetCollections(
- ctx context.Context,
- options client.CollectionFetchOptions,
-) ([]client.Collection, error) {
- cols, err := db.getCollections(ctx, db.txn, options)
- if err != nil {
- return nil, err
- }
-
- for i := range cols {
- cols[i] = cols[i].WithTxn(db.txn)
- }
-
- return cols, nil
-}
-
-// GetSchemaByVersionID returns the schema description for the schema version of the
-// ID provided.
-//
-// Will return an error if it is not found.
-func (db *implicitTxnDB) GetSchemaByVersionID(ctx context.Context, versionID string) (client.SchemaDescription, error) {
- txn, err := db.NewTxn(ctx, true)
- if err != nil {
- return client.SchemaDescription{}, err
- }
- defer txn.Discard(ctx)
-
- return db.getSchemaByVersionID(ctx, txn, versionID)
-}
-
-// GetSchemaByVersionID returns the schema description for the schema version of the
-// ID provided.
-//
-// Will return an error if it is not found.
-func (db *explicitTxnDB) GetSchemaByVersionID(ctx context.Context, versionID string) (client.SchemaDescription, error) {
- return db.getSchemaByVersionID(ctx, db.txn, versionID)
-}
-
-// GetSchemas returns all schema versions that currently exist within
-// this [Store].
-func (db *implicitTxnDB) GetSchemas(
- ctx context.Context,
- options client.SchemaFetchOptions,
-) ([]client.SchemaDescription, error) {
- txn, err := db.NewTxn(ctx, true)
- if err != nil {
- return nil, err
- }
- defer txn.Discard(ctx)
-
- return db.getSchemas(ctx, txn, options)
-}
-
-// GetSchemas returns all schema versions that currently exist within
-// this [Store].
-func (db *explicitTxnDB) GetSchemas(
- ctx context.Context,
- options client.SchemaFetchOptions,
-) ([]client.SchemaDescription, error) {
- return db.getSchemas(ctx, db.txn, options)
-}
-
-// GetAllIndexes gets all the indexes in the database.
-func (db *implicitTxnDB) GetAllIndexes(
- ctx context.Context,
-) (map[client.CollectionName][]client.IndexDescription, error) {
- txn, err := db.NewTxn(ctx, true)
- if err != nil {
- return nil, err
- }
- defer txn.Discard(ctx)
-
- return db.getAllIndexDescriptions(ctx, txn)
-}
-
-// GetAllIndexes gets all the indexes in the database.
-func (db *explicitTxnDB) GetAllIndexes(
- ctx context.Context,
-) (map[client.CollectionName][]client.IndexDescription, error) {
- return db.getAllIndexDescriptions(ctx, db.txn)
-}
-
-// AddSchema takes the provided GQL schema in SDL format, and applies it to the database,
-// creating the necessary collections, request types, etc.
-//
-// All schema types provided must not exist prior to calling this, and they may not reference existing
-// types previously defined.
-func (db *implicitTxnDB) AddSchema(ctx context.Context, schemaString string) ([]client.CollectionDescription, error) {
- txn, err := db.NewTxn(ctx, false)
- if err != nil {
- return nil, err
- }
- defer txn.Discard(ctx)
-
- cols, err := db.addSchema(ctx, txn, schemaString)
- if err != nil {
- return nil, err
- }
-
- if err := txn.Commit(ctx); err != nil {
- return nil, err
- }
- return cols, nil
-}
-
-// AddSchema takes the provided GQL schema in SDL format, and applies it to the database,
-// creating the necessary collections, request types, etc.
-//
-// All schema types provided must not exist prior to calling this, and they may not reference existing
-// types previously defined.
-func (db *explicitTxnDB) AddSchema(ctx context.Context, schemaString string) ([]client.CollectionDescription, error) {
- return db.addSchema(ctx, db.txn, schemaString)
-}
-
-// PatchSchema takes the given JSON patch string and applies it to the set of CollectionDescriptions
-// present in the database.
-//
-// It will also update the GQL types used by the query system. It will error and not apply any of the
-// requested, valid updates should the net result of the patch result in an invalid state. The
-// individual operations defined in the patch do not need to result in a valid state, only the net result
-// of the full patch.
-//
-// The collections (including the schema version ID) will only be updated if any changes have actually
-// been made, if the net result of the patch matches the current persisted description then no changes
-// will be applied.
-func (db *implicitTxnDB) PatchSchema(
- ctx context.Context,
- patchString string,
- migration immutable.Option[model.Lens],
- setAsDefaultVersion bool,
-) error {
- txn, err := db.NewTxn(ctx, false)
- if err != nil {
- return err
- }
- defer txn.Discard(ctx)
-
- err = db.patchSchema(ctx, txn, patchString, migration, setAsDefaultVersion)
- if err != nil {
- return err
- }
-
- return txn.Commit(ctx)
-}
-
-// PatchSchema takes the given JSON patch string and applies it to the set of CollectionDescriptions
-// present in the database.
-//
-// It will also update the GQL types used by the query system. It will error and not apply any of the
-// requested, valid updates should the net result of the patch result in an invalid state. The
-// individual operations defined in the patch do not need to result in a valid state, only the net result
-// of the full patch.
-//
-// The collections (including the schema version ID) will only be updated if any changes have actually
-// been made, if the net result of the patch matches the current persisted description then no changes
-// will be applied.
-func (db *explicitTxnDB) PatchSchema(
- ctx context.Context,
- patchString string,
- migration immutable.Option[model.Lens],
- setAsDefaultVersion bool,
-) error {
- return db.patchSchema(ctx, db.txn, patchString, migration, setAsDefaultVersion)
-}
-
-func (db *implicitTxnDB) SetActiveSchemaVersion(ctx context.Context, schemaVersionID string) error {
- txn, err := db.NewTxn(ctx, false)
- if err != nil {
- return err
- }
- defer txn.Discard(ctx)
-
- err = db.setActiveSchemaVersion(ctx, txn, schemaVersionID)
- if err != nil {
- return err
- }
-
- return txn.Commit(ctx)
-}
-
-func (db *explicitTxnDB) SetActiveSchemaVersion(ctx context.Context, schemaVersionID string) error {
- return db.setActiveSchemaVersion(ctx, db.txn, schemaVersionID)
-}
-
-func (db *implicitTxnDB) SetMigration(ctx context.Context, cfg client.LensConfig) error {
- txn, err := db.NewTxn(ctx, false)
- if err != nil {
- return err
- }
- defer txn.Discard(ctx)
-
- err = db.setMigration(ctx, txn, cfg)
- if err != nil {
- return err
- }
-
- return txn.Commit(ctx)
-}
-
-func (db *explicitTxnDB) SetMigration(ctx context.Context, cfg client.LensConfig) error {
- return db.setMigration(ctx, db.txn, cfg)
-}
-
-func (db *implicitTxnDB) AddView(
- ctx context.Context,
- query string,
- sdl string,
- transform immutable.Option[model.Lens],
-) ([]client.CollectionDefinition, error) {
- txn, err := db.NewTxn(ctx, false)
- if err != nil {
- return nil, err
- }
- defer txn.Discard(ctx)
-
- defs, err := db.addView(ctx, txn, query, sdl, transform)
- if err != nil {
- return nil, err
- }
-
- err = txn.Commit(ctx)
- if err != nil {
- return nil, err
- }
-
- return defs, nil
-}
-
-func (db *explicitTxnDB) AddView(
- ctx context.Context,
- query string,
- sdl string,
- transform immutable.Option[model.Lens],
-) ([]client.CollectionDefinition, error) {
- return db.addView(ctx, db.txn, query, sdl, transform)
-}
-
-// BasicImport imports a json dataset.
-// filepath must be accessible to the node.
-func (db *implicitTxnDB) BasicImport(ctx context.Context, filepath string) error {
- txn, err := db.NewTxn(ctx, false)
- if err != nil {
- return err
- }
- defer txn.Discard(ctx)
-
- err = db.basicImport(ctx, txn, filepath)
- if err != nil {
- return err
- }
-
- return txn.Commit(ctx)
-}
-
-// BasicImport imports a json dataset.
-// filepath must be accessible to the node.
-func (db *explicitTxnDB) BasicImport(ctx context.Context, filepath string) error {
- return db.basicImport(ctx, db.txn, filepath)
-}
-
-// BasicExport exports the current data or subset of data to file in json format.
-func (db *implicitTxnDB) BasicExport(ctx context.Context, config *client.BackupConfig) error {
- txn, err := db.NewTxn(ctx, true)
- if err != nil {
- return err
- }
- defer txn.Discard(ctx)
-
- err = db.basicExport(ctx, txn, config)
- if err != nil {
- return err
- }
-
- return txn.Commit(ctx)
-}
-
-// BasicExport exports the current data or subset of data to file in json format.
-func (db *explicitTxnDB) BasicExport(ctx context.Context, config *client.BackupConfig) error {
- return db.basicExport(ctx, db.txn, config)
-}
-
-// LensRegistry returns the LensRegistry in use by this database instance.
-//
-// It exposes several useful thread-safe migration related functions.
-func (db *explicitTxnDB) LensRegistry() client.LensRegistry {
- return db.lensRegistry
-}
diff --git a/db/view.go b/db/view.go
index ea57f94541..7cf040cbc5 100644
--- a/db/view.go
+++ b/db/view.go
@@ -20,17 +20,17 @@ import (
"github.com/sourcenetwork/defradb/client"
"github.com/sourcenetwork/defradb/client/request"
- "github.com/sourcenetwork/defradb/datastore"
"github.com/sourcenetwork/defradb/db/description"
)
func (db *db) addView(
ctx context.Context,
- txn datastore.Txn,
inputQuery string,
sdl string,
transform immutable.Option[model.Lens],
) ([]client.CollectionDefinition, error) {
+ txn := mustGetContextTxn(ctx)
+
// Wrap the given query as part of the GQL query object - this simplifies the syntax for users
// and ensures that we can't be given mutations. In the future this line should disappear along
// with the all calls to the parser appart from `ParseSDL` when we implement the DQL stuff.
@@ -80,7 +80,7 @@ func (db *db) addView(
Schema: schema,
}
} else {
- col, err := db.createCollection(ctx, txn, definition)
+ col, err := db.createCollection(ctx, definition, newDefinitions)
if err != nil {
return nil, err
}
@@ -97,7 +97,7 @@ func (db *db) addView(
}
}
- err = db.loadSchema(ctx, txn)
+ err = db.loadSchema(ctx)
if err != nil {
return nil, err
}
diff --git a/docs/cli/defradb.md b/docs/cli/defradb.md
index c89ce0f1aa..602206e575 100644
--- a/docs/cli/defradb.md
+++ b/docs/cli/defradb.md
@@ -14,11 +14,13 @@ Start a DefraDB node, interact with a local or remote node, and much more.
```
--allowed-origins stringArray List of origins to allow for CORS requests
-h, --help help for defradb
- --logformat string Log format to use. Options are csv, json (default "csv")
- --loglevel string Log level to use. Options are debug, info, error, fatal (default "info")
- --lognocolor Disable colored log output
- --logoutput string Log output path (default "stderr")
- --logtrace Include stacktrace in error and fatal logs
+ --log-format string Log format to use. Options are text or json (default "text")
+ --log-level string Log level to use. Options are debug, info, error, fatal (default "info")
+ --log-no-color Disable colored log output
+ --log-output string Log output path. Options are stderr or stdout. (default "stderr")
+ --log-overrides string Logger config overrides. Format ,=,...;,...
+ --log-source Include source location in logs
+ --log-stacktrace Include stacktrace in error and fatal logs
--max-txn-retries int Specify the maximum number of retries per transaction (default 5)
--no-p2p Disable the peer-to-peer network synchronization system
--p2paddr strings Listen addresses for the p2p network (formatted as a libp2p MultiAddr) (default [/ip4/127.0.0.1/tcp/9171])
diff --git a/docs/cli/defradb_client.md b/docs/cli/defradb_client.md
index 30e8c804ee..302e171dd3 100644
--- a/docs/cli/defradb_client.md
+++ b/docs/cli/defradb_client.md
@@ -10,19 +10,22 @@ Execute queries, add schema types, obtain node info, etc.
### Options
```
- -h, --help help for client
- --tx uint Transaction ID
+ -h, --help help for client
+ -i, --identity string ACP Identity
+ --tx uint Transaction ID
```
### Options inherited from parent commands
```
--allowed-origins stringArray List of origins to allow for CORS requests
- --logformat string Log format to use. Options are csv, json (default "csv")
- --loglevel string Log level to use. Options are debug, info, error, fatal (default "info")
- --lognocolor Disable colored log output
- --logoutput string Log output path (default "stderr")
- --logtrace Include stacktrace in error and fatal logs
+ --log-format string Log format to use. Options are text or json (default "text")
+ --log-level string Log level to use. Options are debug, info, error, fatal (default "info")
+ --log-no-color Disable colored log output
+ --log-output string Log output path. Options are stderr or stdout. (default "stderr")
+ --log-overrides string Logger config overrides. Format ,=,...;,...
+ --log-source Include source location in logs
+ --log-stacktrace Include stacktrace in error and fatal logs
--max-txn-retries int Specify the maximum number of retries per transaction (default 5)
--no-p2p Disable the peer-to-peer network synchronization system
--p2paddr strings Listen addresses for the p2p network (formatted as a libp2p MultiAddr) (default [/ip4/127.0.0.1/tcp/9171])
@@ -38,6 +41,7 @@ Execute queries, add schema types, obtain node info, etc.
### SEE ALSO
* [defradb](defradb.md) - DefraDB Edge Database
+* [defradb client acp](defradb_client_acp.md) - Interact with the access control system of a DefraDB node
* [defradb client backup](defradb_client_backup.md) - Interact with the backup utility
* [defradb client collection](defradb_client_collection.md) - Interact with a collection.
* [defradb client dump](defradb_client_dump.md) - Dump the contents of DefraDB node-side
diff --git a/docs/cli/defradb_client_acp.md b/docs/cli/defradb_client_acp.md
new file mode 100644
index 0000000000..d3f57ae230
--- /dev/null
+++ b/docs/cli/defradb_client_acp.md
@@ -0,0 +1,48 @@
+## defradb client acp
+
+Interact with the access control system of a DefraDB node
+
+### Synopsis
+
+Interact with the access control system of a DefraDB node
+
+Learn more about [ACP](/acp/README.md)
+
+
+
+### Options
+
+```
+ -h, --help help for acp
+```
+
+### Options inherited from parent commands
+
+```
+ --allowed-origins stringArray List of origins to allow for CORS requests
+ -i, --identity string ACP Identity
+ --log-format string Log format to use. Options are text or json (default "text")
+ --log-level string Log level to use. Options are debug, info, error, fatal (default "info")
+ --log-no-color Disable colored log output
+ --log-output string Log output path. Options are stderr or stdout. (default "stderr")
+ --log-overrides string Logger config overrides. Format ,=,...;,...
+ --log-source Include source location in logs
+ --log-stacktrace Include stacktrace in error and fatal logs
+ --max-txn-retries int Specify the maximum number of retries per transaction (default 5)
+ --no-p2p Disable the peer-to-peer network synchronization system
+ --p2paddr strings Listen addresses for the p2p network (formatted as a libp2p MultiAddr) (default [/ip4/127.0.0.1/tcp/9171])
+ --peers stringArray List of peers to connect to
+ --privkeypath string Path to the private key for tls
+ --pubkeypath string Path to the public key for tls
+ --rootdir string Directory for persistent data (default: $HOME/.defradb)
+ --store string Specify the datastore to use (supported: badger, memory) (default "badger")
+ --tx uint Transaction ID
+ --url string URL of HTTP endpoint to listen on or connect to (default "127.0.0.1:9181")
+ --valuelogfilesize int Specify the datastore value log file size (in bytes). In memory size will be 2*valuelogfilesize (default 1073741824)
+```
+
+### SEE ALSO
+
+* [defradb client](defradb_client.md) - Interact with a DefraDB node
+* [defradb client acp policy](defradb_client_acp_policy.md) - Interact with the acp policy features of DefraDB instance
+
diff --git a/docs/cli/defradb_client_acp_policy.md b/docs/cli/defradb_client_acp_policy.md
new file mode 100644
index 0000000000..2e659a0eb4
--- /dev/null
+++ b/docs/cli/defradb_client_acp_policy.md
@@ -0,0 +1,44 @@
+## defradb client acp policy
+
+Interact with the acp policy features of DefraDB instance
+
+### Synopsis
+
+Interact with the acp policy features of DefraDB instance
+
+### Options
+
+```
+ -h, --help help for policy
+```
+
+### Options inherited from parent commands
+
+```
+ --allowed-origins stringArray List of origins to allow for CORS requests
+ -i, --identity string ACP Identity
+ --log-format string Log format to use. Options are text or json (default "text")
+ --log-level string Log level to use. Options are debug, info, error, fatal (default "info")
+ --log-no-color Disable colored log output
+ --log-output string Log output path. Options are stderr or stdout. (default "stderr")
+ --log-overrides string Logger config overrides. Format ,=,...;,...
+ --log-source Include source location in logs
+ --log-stacktrace Include stacktrace in error and fatal logs
+ --max-txn-retries int Specify the maximum number of retries per transaction (default 5)
+ --no-p2p Disable the peer-to-peer network synchronization system
+ --p2paddr strings Listen addresses for the p2p network (formatted as a libp2p MultiAddr) (default [/ip4/127.0.0.1/tcp/9171])
+ --peers stringArray List of peers to connect to
+ --privkeypath string Path to the private key for tls
+ --pubkeypath string Path to the public key for tls
+ --rootdir string Directory for persistent data (default: $HOME/.defradb)
+ --store string Specify the datastore to use (supported: badger, memory) (default "badger")
+ --tx uint Transaction ID
+ --url string URL of HTTP endpoint to listen on or connect to (default "127.0.0.1:9181")
+ --valuelogfilesize int Specify the datastore value log file size (in bytes). In memory size will be 2*valuelogfilesize (default 1073741824)
+```
+
+### SEE ALSO
+
+* [defradb client acp](defradb_client_acp.md) - Interact with the access control system of a DefraDB node
+* [defradb client acp policy add](defradb_client_acp_policy_add.md) - Add new policy
+
diff --git a/docs/cli/defradb_client_acp_policy_add.md b/docs/cli/defradb_client_acp_policy_add.md
new file mode 100644
index 0000000000..f426909323
--- /dev/null
+++ b/docs/cli/defradb_client_acp_policy_add.md
@@ -0,0 +1,91 @@
+## defradb client acp policy add
+
+Add new policy
+
+### Synopsis
+
+Add new policy
+
+Notes:
+ - Can not add a policy without specifying an identity.
+ - ACP must be available (i.e. ACP can not be disabled).
+ - A non-DPI policy will be accepted (will be registered with acp system).
+ - But only a valid DPI policyID & resource can be specified on a schema.
+ - DPI validation happens when attempting to add a schema with '@policy'.
+ - Learn more about [ACP & DPI Rules](/acp/README.md)
+
+Example: add from an argument string:
+ defradb client acp policy add -i cosmos1f2djr7dl9vhrk3twt3xwqp09nhtzec9mdkf70j '
+description: A Valid DefraDB Policy Interface
+
+actor:
+ name: actor
+
+resources:
+ users:
+ permissions:
+ read:
+ expr: owner + reader
+ write:
+ expr: owner
+
+ relations:
+ owner:
+ types:
+ - actor
+ reader:
+ types:
+ - actor
+'
+
+Example: add from file:
+ defradb client acp policy add -i cosmos17r39df0hdcrgnmmw4mvu7qgk5nu888c7uvv37y -f policy.yml
+
+Example: add from file, verbose flags:
+ defradb client acp policy add --identity cosmos1kpw734v54g0t0d8tcye8ee5jc3gld0tcr2q473 --file policy.yml
+
+Example: add from stdin:
+ cat policy.yml | defradb client acp policy add -
+
+
+
+```
+defradb client acp policy add [-i --identity] [policy] [flags]
+```
+
+### Options
+
+```
+ -f, --file string File to load a policy from
+ -h, --help help for add
+```
+
+### Options inherited from parent commands
+
+```
+ --allowed-origins stringArray List of origins to allow for CORS requests
+ -i, --identity string ACP Identity
+ --log-format string Log format to use. Options are text or json (default "text")
+ --log-level string Log level to use. Options are debug, info, error, fatal (default "info")
+ --log-no-color Disable colored log output
+ --log-output string Log output path. Options are stderr or stdout. (default "stderr")
+ --log-overrides string Logger config overrides. Format ,=,...;,...
+ --log-source Include source location in logs
+ --log-stacktrace Include stacktrace in error and fatal logs
+ --max-txn-retries int Specify the maximum number of retries per transaction (default 5)
+ --no-p2p Disable the peer-to-peer network synchronization system
+ --p2paddr strings Listen addresses for the p2p network (formatted as a libp2p MultiAddr) (default [/ip4/127.0.0.1/tcp/9171])
+ --peers stringArray List of peers to connect to
+ --privkeypath string Path to the private key for tls
+ --pubkeypath string Path to the public key for tls
+ --rootdir string Directory for persistent data (default: $HOME/.defradb)
+ --store string Specify the datastore to use (supported: badger, memory) (default "badger")
+ --tx uint Transaction ID
+ --url string URL of HTTP endpoint to listen on or connect to (default "127.0.0.1:9181")
+ --valuelogfilesize int Specify the datastore value log file size (in bytes). In memory size will be 2*valuelogfilesize (default 1073741824)
+```
+
+### SEE ALSO
+
+* [defradb client acp policy](defradb_client_acp_policy.md) - Interact with the acp policy features of DefraDB instance
+
diff --git a/docs/cli/defradb_client_backup.md b/docs/cli/defradb_client_backup.md
index a7c7ae453b..ffa879365c 100644
--- a/docs/cli/defradb_client_backup.md
+++ b/docs/cli/defradb_client_backup.md
@@ -17,11 +17,14 @@ Currently only supports JSON format.
```
--allowed-origins stringArray List of origins to allow for CORS requests
- --logformat string Log format to use. Options are csv, json (default "csv")
- --loglevel string Log level to use. Options are debug, info, error, fatal (default "info")
- --lognocolor Disable colored log output
- --logoutput string Log output path (default "stderr")
- --logtrace Include stacktrace in error and fatal logs
+ -i, --identity string ACP Identity
+ --log-format string Log format to use. Options are text or json (default "text")
+ --log-level string Log level to use. Options are debug, info, error, fatal (default "info")
+ --log-no-color Disable colored log output
+ --log-output string Log output path. Options are stderr or stdout. (default "stderr")
+ --log-overrides string Logger config overrides. Format ,=,...;,...
+ --log-source Include source location in logs
+ --log-stacktrace Include stacktrace in error and fatal logs
--max-txn-retries int Specify the maximum number of retries per transaction (default 5)
--no-p2p Disable the peer-to-peer network synchronization system
--p2paddr strings Listen addresses for the p2p network (formatted as a libp2p MultiAddr) (default [/ip4/127.0.0.1/tcp/9171])
diff --git a/docs/cli/defradb_client_backup_export.md b/docs/cli/defradb_client_backup_export.md
index 6992b120c6..fc05e8ee14 100644
--- a/docs/cli/defradb_client_backup_export.md
+++ b/docs/cli/defradb_client_backup_export.md
@@ -31,11 +31,14 @@ defradb client backup export [-c --collections | -p --pretty | -f --format] ,=,...;,...
+ --log-source Include source location in logs
+ --log-stacktrace Include stacktrace in error and fatal logs
--max-txn-retries int Specify the maximum number of retries per transaction (default 5)
--no-p2p Disable the peer-to-peer network synchronization system
--p2paddr strings Listen addresses for the p2p network (formatted as a libp2p MultiAddr) (default [/ip4/127.0.0.1/tcp/9171])
diff --git a/docs/cli/defradb_client_backup_import.md b/docs/cli/defradb_client_backup_import.md
index ad2d3a1117..373f5be89c 100644
--- a/docs/cli/defradb_client_backup_import.md
+++ b/docs/cli/defradb_client_backup_import.md
@@ -23,11 +23,14 @@ defradb client backup import [flags]
```
--allowed-origins stringArray List of origins to allow for CORS requests
- --logformat string Log format to use. Options are csv, json (default "csv")
- --loglevel string Log level to use. Options are debug, info, error, fatal (default "info")
- --lognocolor Disable colored log output
- --logoutput string Log output path (default "stderr")
- --logtrace Include stacktrace in error and fatal logs
+ -i, --identity string ACP Identity
+ --log-format string Log format to use. Options are text or json (default "text")
+ --log-level string Log level to use. Options are debug, info, error, fatal (default "info")
+ --log-no-color Disable colored log output
+ --log-output string Log output path. Options are stderr or stdout. (default "stderr")
+ --log-overrides string Logger config overrides. Format ,=,...;,...
+ --log-source Include source location in logs
+ --log-stacktrace Include stacktrace in error and fatal logs
--max-txn-retries int Specify the maximum number of retries per transaction (default 5)
--no-p2p Disable the peer-to-peer network synchronization system
--p2paddr strings Listen addresses for the p2p network (formatted as a libp2p MultiAddr) (default [/ip4/127.0.0.1/tcp/9171])
diff --git a/docs/cli/defradb_client_collection.md b/docs/cli/defradb_client_collection.md
index 593e2d01ee..59faa94f78 100644
--- a/docs/cli/defradb_client_collection.md
+++ b/docs/cli/defradb_client_collection.md
@@ -9,23 +9,26 @@ Create, read, update, and delete documents within a collection.
### Options
```
- --get-inactive Get inactive collections as well as active
- -h, --help help for collection
- --name string Collection name
- --schema string Collection schema Root
- --tx uint Transaction ID
- --version string Collection version ID
+ --get-inactive Get inactive collections as well as active
+ -h, --help help for collection
+ -i, --identity string ACP Identity
+ --name string Collection name
+ --schema string Collection schema Root
+ --tx uint Transaction ID
+ --version string Collection version ID
```
### Options inherited from parent commands
```
--allowed-origins stringArray List of origins to allow for CORS requests
- --logformat string Log format to use. Options are csv, json (default "csv")
- --loglevel string Log level to use. Options are debug, info, error, fatal (default "info")
- --lognocolor Disable colored log output
- --logoutput string Log output path (default "stderr")
- --logtrace Include stacktrace in error and fatal logs
+ --log-format string Log format to use. Options are text or json (default "text")
+ --log-level string Log level to use. Options are debug, info, error, fatal (default "info")
+ --log-no-color Disable colored log output
+ --log-output string Log output path. Options are stderr or stdout. (default "stderr")
+ --log-overrides string Logger config overrides. Format ,=,...;,...
+ --log-source Include source location in logs
+ --log-stacktrace Include stacktrace in error and fatal logs
--max-txn-retries int Specify the maximum number of retries per transaction (default 5)
--no-p2p Disable the peer-to-peer network synchronization system
--p2paddr strings Listen addresses for the p2p network (formatted as a libp2p MultiAddr) (default [/ip4/127.0.0.1/tcp/9171])
@@ -46,5 +49,6 @@ Create, read, update, and delete documents within a collection.
* [defradb client collection describe](defradb_client_collection_describe.md) - View collection description.
* [defradb client collection docIDs](defradb_client_collection_docIDs.md) - List all document IDs (docIDs).
* [defradb client collection get](defradb_client_collection_get.md) - View document fields.
+* [defradb client collection patch](defradb_client_collection_patch.md) - Patch existing collection descriptions
* [defradb client collection update](defradb_client_collection_update.md) - Update documents by docID or filter.
diff --git a/docs/cli/defradb_client_collection_create.md b/docs/cli/defradb_client_collection_create.md
index 7c2cba7487..b565c2a547 100644
--- a/docs/cli/defradb_client_collection_create.md
+++ b/docs/cli/defradb_client_collection_create.md
@@ -6,21 +6,24 @@ Create a new document.
Create a new document.
-Example: create from string
+Example: create from string:
defradb client collection create --name User '{ "name": "Bob" }'
-Example: create multiple from string
+Example: create from string, with identity:
+ defradb client collection create -i cosmos1f2djr7dl9vhrk3twt3xwqp09nhtzec9mdkf70j --name User '{ "name": "Bob" }'
+
+Example: create multiple from string:
defradb client collection create --name User '[{ "name": "Alice" }, { "name": "Bob" }]'
-Example: create from file
+Example: create from file:
defradb client collection create --name User -f document.json
-Example: create from stdin
+Example: create from stdin:
cat document.json | defradb client collection create --name User -
```
-defradb client collection create [flags]
+defradb client collection create [-i --identity] [flags]
```
### Options
@@ -35,11 +38,14 @@ defradb client collection create [flags]
```
--allowed-origins stringArray List of origins to allow for CORS requests
--get-inactive Get inactive collections as well as active
- --logformat string Log format to use. Options are csv, json (default "csv")
- --loglevel string Log level to use. Options are debug, info, error, fatal (default "info")
- --lognocolor Disable colored log output
- --logoutput string Log output path (default "stderr")
- --logtrace Include stacktrace in error and fatal logs
+ -i, --identity string ACP Identity
+ --log-format string Log format to use. Options are text or json (default "text")
+ --log-level string Log level to use. Options are debug, info, error, fatal (default "info")
+ --log-no-color Disable colored log output
+ --log-output string Log output path. Options are stderr or stdout. (default "stderr")
+ --log-overrides string Logger config overrides. Format ,=,...;,...
+ --log-source Include source location in logs
+ --log-stacktrace Include stacktrace in error and fatal logs
--max-txn-retries int Specify the maximum number of retries per transaction (default 5)
--name string Collection name
--no-p2p Disable the peer-to-peer network synchronization system
diff --git a/docs/cli/defradb_client_collection_delete.md b/docs/cli/defradb_client_collection_delete.md
index 33a5af4809..2bca8d7d8a 100644
--- a/docs/cli/defradb_client_collection_delete.md
+++ b/docs/cli/defradb_client_collection_delete.md
@@ -6,21 +6,24 @@ Delete documents by docID or filter.
Delete documents by docID or filter and lists the number of documents deleted.
-Example: delete by docID(s)
- defradb client collection delete --name User --docID bae-123,bae-456
+Example: delete by docID:
+ defradb client collection delete --name User --docID bae-123
-Example: delete by filter
+Example: delete by docID with identity:
+ defradb client collection delete -i cosmos1f2djr7dl9vhrk3twt3xwqp09nhtzec9mdkf70j --name User --docID bae-123
+
+Example: delete by filter:
defradb client collection delete --name User --filter '{ "_gte": { "points": 100 } }'
```
-defradb client collection delete [--filter --docID ] [flags]
+defradb client collection delete [-i --identity] [--filter --docID ] [flags]
```
### Options
```
- --docID strings Document ID
+ --docID string Document ID
--filter string Document filter
-h, --help help for delete
```
@@ -30,11 +33,14 @@ defradb client collection delete [--filter --docID ] [flags]
```
--allowed-origins stringArray List of origins to allow for CORS requests
--get-inactive Get inactive collections as well as active
- --logformat string Log format to use. Options are csv, json (default "csv")
- --loglevel string Log level to use. Options are debug, info, error, fatal (default "info")
- --lognocolor Disable colored log output
- --logoutput string Log output path (default "stderr")
- --logtrace Include stacktrace in error and fatal logs
+ -i, --identity string ACP Identity
+ --log-format string Log format to use. Options are text or json (default "text")
+ --log-level string Log level to use. Options are debug, info, error, fatal (default "info")
+ --log-no-color Disable colored log output
+ --log-output string Log output path. Options are stderr or stdout. (default "stderr")
+ --log-overrides string Logger config overrides. Format ,=,...;,...
+ --log-source Include source location in logs
+ --log-stacktrace Include stacktrace in error and fatal logs
--max-txn-retries int Specify the maximum number of retries per transaction (default 5)
--name string Collection name
--no-p2p Disable the peer-to-peer network synchronization system
diff --git a/docs/cli/defradb_client_collection_describe.md b/docs/cli/defradb_client_collection_describe.md
index 46e8623d6a..bea05a1321 100644
--- a/docs/cli/defradb_client_collection_describe.md
+++ b/docs/cli/defradb_client_collection_describe.md
@@ -37,11 +37,14 @@ defradb client collection describe [flags]
```
--allowed-origins stringArray List of origins to allow for CORS requests
- --logformat string Log format to use. Options are csv, json (default "csv")
- --loglevel string Log level to use. Options are debug, info, error, fatal (default "info")
- --lognocolor Disable colored log output
- --logoutput string Log output path (default "stderr")
- --logtrace Include stacktrace in error and fatal logs
+ -i, --identity string ACP Identity
+ --log-format string Log format to use. Options are text or json (default "text")
+ --log-level string Log level to use. Options are debug, info, error, fatal (default "info")
+ --log-no-color Disable colored log output
+ --log-output string Log output path. Options are stderr or stdout. (default "stderr")
+ --log-overrides string Logger config overrides. Format ,=,...;,...
+ --log-source Include source location in logs
+ --log-stacktrace Include stacktrace in error and fatal logs
--max-txn-retries int Specify the maximum number of retries per transaction (default 5)
--no-p2p Disable the peer-to-peer network synchronization system
--p2paddr strings Listen addresses for the p2p network (formatted as a libp2p MultiAddr) (default [/ip4/127.0.0.1/tcp/9171])
diff --git a/docs/cli/defradb_client_collection_docIDs.md b/docs/cli/defradb_client_collection_docIDs.md
index c976d05417..1cf1a8444a 100644
--- a/docs/cli/defradb_client_collection_docIDs.md
+++ b/docs/cli/defradb_client_collection_docIDs.md
@@ -6,12 +6,15 @@ List all document IDs (docIDs).
List all document IDs (docIDs).
-Example:
+Example: list all docID(s):
defradb client collection docIDs --name User
+
+Example: list all docID(s), with an identity:
+ defradb client collection docIDs -i cosmos1f2djr7dl9vhrk3twt3xwqp09nhtzec9mdkf70j --name User
```
-defradb client collection docIDs [flags]
+defradb client collection docIDs [-i --identity] [flags]
```
### Options
@@ -25,11 +28,14 @@ defradb client collection docIDs [flags]
```
--allowed-origins stringArray List of origins to allow for CORS requests
--get-inactive Get inactive collections as well as active
- --logformat string Log format to use. Options are csv, json (default "csv")
- --loglevel string Log level to use. Options are debug, info, error, fatal (default "info")
- --lognocolor Disable colored log output
- --logoutput string Log output path (default "stderr")
- --logtrace Include stacktrace in error and fatal logs
+ -i, --identity string ACP Identity
+ --log-format string Log format to use. Options are text or json (default "text")
+ --log-level string Log level to use. Options are debug, info, error, fatal (default "info")
+ --log-no-color Disable colored log output
+ --log-output string Log output path. Options are stderr or stdout. (default "stderr")
+ --log-overrides string Logger config overrides. Format ,=,...;,...
+ --log-source Include source location in logs
+ --log-stacktrace Include stacktrace in error and fatal logs
--max-txn-retries int Specify the maximum number of retries per transaction (default 5)
--name string Collection name
--no-p2p Disable the peer-to-peer network synchronization system
diff --git a/docs/cli/defradb_client_collection_get.md b/docs/cli/defradb_client_collection_get.md
index c2aeac17b3..7b80a2a54b 100644
--- a/docs/cli/defradb_client_collection_get.md
+++ b/docs/cli/defradb_client_collection_get.md
@@ -8,10 +8,13 @@ View document fields.
Example:
defradb client collection get --name User bae-123
+
+Example to get a private document we must use an identity:
+ defradb client collection get -i cosmos1f2djr7dl9vhrk3twt3xwqp09nhtzec9mdkf70j --name User bae-123
```
-defradb client collection get [--show-deleted] [flags]
+defradb client collection get [-i --identity] [--show-deleted] [flags]
```
### Options
@@ -26,11 +29,14 @@ defradb client collection get [--show-deleted] [flags]
```
--allowed-origins stringArray List of origins to allow for CORS requests
--get-inactive Get inactive collections as well as active
- --logformat string Log format to use. Options are csv, json (default "csv")
- --loglevel string Log level to use. Options are debug, info, error, fatal (default "info")
- --lognocolor Disable colored log output
- --logoutput string Log output path (default "stderr")
- --logtrace Include stacktrace in error and fatal logs
+ -i, --identity string ACP Identity
+ --log-format string Log format to use. Options are text or json (default "text")
+ --log-level string Log level to use. Options are debug, info, error, fatal (default "info")
+ --log-no-color Disable colored log output
+ --log-output string Log output path. Options are stderr or stdout. (default "stderr")
+ --log-overrides string Logger config overrides. Format ,=,...;,...
+ --log-source Include source location in logs
+ --log-stacktrace Include stacktrace in error and fatal logs
--max-txn-retries int Specify the maximum number of retries per transaction (default 5)
--name string Collection name
--no-p2p Disable the peer-to-peer network synchronization system
diff --git a/docs/cli/defradb_client_collection_patch.md b/docs/cli/defradb_client_collection_patch.md
new file mode 100644
index 0000000000..c8540aa397
--- /dev/null
+++ b/docs/cli/defradb_client_collection_patch.md
@@ -0,0 +1,65 @@
+## defradb client collection patch
+
+Patch existing collection descriptions
+
+### Synopsis
+
+Patch existing collection descriptions.
+
+Uses JSON Patch to modify collection descriptions.
+
+Example: patch from an argument string:
+ defradb client collection patch '[{ "op": "add", "path": "...", "value": {...} }]'
+
+Example: patch from file:
+ defradb client collection patch -p patch.json
+
+Example: patch from stdin:
+ cat patch.json | defradb client collection patch -
+
+To learn more about the DefraDB GraphQL Schema Language, refer to https://docs.source.network.
+
+```
+defradb client collection patch [patch] [flags]
+```
+
+### Options
+
+```
+ -h, --help help for patch
+ -p, --patch-file string File to load a patch from
+```
+
+### Options inherited from parent commands
+
+```
+ --allowed-origins stringArray List of origins to allow for CORS requests
+ --get-inactive Get inactive collections as well as active
+ -i, --identity string ACP Identity
+ --log-format string Log format to use. Options are text or json (default "text")
+ --log-level string Log level to use. Options are debug, info, error, fatal (default "info")
+ --log-no-color Disable colored log output
+ --log-output string Log output path. Options are stderr or stdout. (default "stderr")
+ --log-overrides string Logger config overrides. Format ,=,...;,...
+ --log-source Include source location in logs
+ --log-stacktrace Include stacktrace in error and fatal logs
+ --max-txn-retries int Specify the maximum number of retries per transaction (default 5)
+ --name string Collection name
+ --no-p2p Disable the peer-to-peer network synchronization system
+ --p2paddr strings Listen addresses for the p2p network (formatted as a libp2p MultiAddr) (default [/ip4/127.0.0.1/tcp/9171])
+ --peers stringArray List of peers to connect to
+ --privkeypath string Path to the private key for tls
+ --pubkeypath string Path to the public key for tls
+ --rootdir string Directory for persistent data (default: $HOME/.defradb)
+ --schema string Collection schema Root
+ --store string Specify the datastore to use (supported: badger, memory) (default "badger")
+ --tx uint Transaction ID
+ --url string URL of HTTP endpoint to listen on or connect to (default "127.0.0.1:9181")
+ --valuelogfilesize int Specify the datastore value log file size (in bytes). In memory size will be 2*valuelogfilesize (default 1073741824)
+ --version string Collection version ID
+```
+
+### SEE ALSO
+
+* [defradb client collection](defradb_client_collection.md) - Interact with a collection.
+
diff --git a/docs/cli/defradb_client_collection_update.md b/docs/cli/defradb_client_collection_update.md
index 1200cc5b3e..ab6b8999b0 100644
--- a/docs/cli/defradb_client_collection_update.md
+++ b/docs/cli/defradb_client_collection_update.md
@@ -6,26 +6,30 @@ Update documents by docID or filter.
Update documents by docID or filter.
-Example: update from string
+Example: update from string:
defradb client collection update --name User --docID bae-123 '{ "name": "Bob" }'
-Example: update by filter
+Example: update by filter:
defradb client collection update --name User \
--filter '{ "_gte": { "points": 100 } }' --updater '{ "verified": true }'
-Example: update by docIDs
+Example: update by docID:
defradb client collection update --name User \
- --docID bae-123,bae-456 --updater '{ "verified": true }'
+ --docID bae-123 --updater '{ "verified": true }'
+
+Example: update private docID, with identity:
+ defradb client collection update -i cosmos1f2djr7dl9vhrk3twt3xwqp09nhtzec9mdkf70j --name User \
+ --docID bae-123 --updater '{ "verified": true }'
```
-defradb client collection update [--filter --docID --updater ] [flags]
+defradb client collection update [-i --identity] [--filter --docID --updater ] [flags]
```
### Options
```
- --docID strings Document ID
+ --docID string Document ID
--filter string Document filter
-h, --help help for update
--updater string Document updater
@@ -36,11 +40,14 @@ defradb client collection update [--filter --docID --updater ,=,...;,...
+ --log-source Include source location in logs
+ --log-stacktrace Include stacktrace in error and fatal logs
--max-txn-retries int Specify the maximum number of retries per transaction (default 5)
--name string Collection name
--no-p2p Disable the peer-to-peer network synchronization system
diff --git a/docs/cli/defradb_client_dump.md b/docs/cli/defradb_client_dump.md
index bc00e292b9..a819df1514 100644
--- a/docs/cli/defradb_client_dump.md
+++ b/docs/cli/defradb_client_dump.md
@@ -16,11 +16,14 @@ defradb client dump [flags]
```
--allowed-origins stringArray List of origins to allow for CORS requests
- --logformat string Log format to use. Options are csv, json (default "csv")
- --loglevel string Log level to use. Options are debug, info, error, fatal (default "info")
- --lognocolor Disable colored log output
- --logoutput string Log output path (default "stderr")
- --logtrace Include stacktrace in error and fatal logs
+ -i, --identity string ACP Identity
+ --log-format string Log format to use. Options are text or json (default "text")
+ --log-level string Log level to use. Options are debug, info, error, fatal (default "info")
+ --log-no-color Disable colored log output
+ --log-output string Log output path. Options are stderr or stdout. (default "stderr")
+ --log-overrides string Logger config overrides. Format ,=,...;,...
+ --log-source Include source location in logs
+ --log-stacktrace Include stacktrace in error and fatal logs
--max-txn-retries int Specify the maximum number of retries per transaction (default 5)
--no-p2p Disable the peer-to-peer network synchronization system
--p2paddr strings Listen addresses for the p2p network (formatted as a libp2p MultiAddr) (default [/ip4/127.0.0.1/tcp/9171])
diff --git a/docs/cli/defradb_client_index.md b/docs/cli/defradb_client_index.md
index 0dab1de7fe..bb59a6373b 100644
--- a/docs/cli/defradb_client_index.md
+++ b/docs/cli/defradb_client_index.md
@@ -16,11 +16,14 @@ Manage (create, drop, or list) collection indexes on a DefraDB node.
```
--allowed-origins stringArray List of origins to allow for CORS requests
- --logformat string Log format to use. Options are csv, json (default "csv")
- --loglevel string Log level to use. Options are debug, info, error, fatal (default "info")
- --lognocolor Disable colored log output
- --logoutput string Log output path (default "stderr")
- --logtrace Include stacktrace in error and fatal logs
+ -i, --identity string ACP Identity
+ --log-format string Log format to use. Options are text or json (default "text")
+ --log-level string Log level to use. Options are debug, info, error, fatal (default "info")
+ --log-no-color Disable colored log output
+ --log-output string Log output path. Options are stderr or stdout. (default "stderr")
+ --log-overrides string Logger config overrides. Format ,=,...;,...
+ --log-source Include source location in logs
+ --log-stacktrace Include stacktrace in error and fatal logs
--max-txn-retries int Specify the maximum number of retries per transaction (default 5)
--no-p2p Disable the peer-to-peer network synchronization system
--p2paddr strings Listen addresses for the p2p network (formatted as a libp2p MultiAddr) (default [/ip4/127.0.0.1/tcp/9171])
diff --git a/docs/cli/defradb_client_index_create.md b/docs/cli/defradb_client_index_create.md
index cbdbbe1d50..8c365e348e 100644
--- a/docs/cli/defradb_client_index_create.md
+++ b/docs/cli/defradb_client_index_create.md
@@ -33,11 +33,14 @@ defradb client index create -c --collection --fields [-n -
```
--allowed-origins stringArray List of origins to allow for CORS requests
- --logformat string Log format to use. Options are csv, json (default "csv")
- --loglevel string Log level to use. Options are debug, info, error, fatal (default "info")
- --lognocolor Disable colored log output
- --logoutput string Log output path (default "stderr")
- --logtrace Include stacktrace in error and fatal logs
+ -i, --identity string ACP Identity
+ --log-format string Log format to use. Options are text or json (default "text")
+ --log-level string Log level to use. Options are debug, info, error, fatal (default "info")
+ --log-no-color Disable colored log output
+ --log-output string Log output path. Options are stderr or stdout. (default "stderr")
+ --log-overrides string Logger config overrides. Format ,=,...;,...
+ --log-source Include source location in logs
+ --log-stacktrace Include stacktrace in error and fatal logs
--max-txn-retries int Specify the maximum number of retries per transaction (default 5)
--no-p2p Disable the peer-to-peer network synchronization system
--p2paddr strings Listen addresses for the p2p network (formatted as a libp2p MultiAddr) (default [/ip4/127.0.0.1/tcp/9171])
diff --git a/docs/cli/defradb_client_index_drop.md b/docs/cli/defradb_client_index_drop.md
index bb9e6ec30a..03b206c6cb 100644
--- a/docs/cli/defradb_client_index_drop.md
+++ b/docs/cli/defradb_client_index_drop.md
@@ -25,11 +25,14 @@ defradb client index drop -c --collection -n --name [flags]
```
--allowed-origins stringArray List of origins to allow for CORS requests
- --logformat string Log format to use. Options are csv, json (default "csv")
- --loglevel string Log level to use. Options are debug, info, error, fatal (default "info")
- --lognocolor Disable colored log output
- --logoutput string Log output path (default "stderr")
- --logtrace Include stacktrace in error and fatal logs
+ -i, --identity string ACP Identity
+ --log-format string Log format to use. Options are text or json (default "text")
+ --log-level string Log level to use. Options are debug, info, error, fatal (default "info")
+ --log-no-color Disable colored log output
+ --log-output string Log output path. Options are stderr or stdout. (default "stderr")
+ --log-overrides string Logger config overrides. Format ,=,...;,...
+ --log-source Include source location in logs
+ --log-stacktrace Include stacktrace in error and fatal logs
--max-txn-retries int Specify the maximum number of retries per transaction (default 5)
--no-p2p Disable the peer-to-peer network synchronization system
--p2paddr strings Listen addresses for the p2p network (formatted as a libp2p MultiAddr) (default [/ip4/127.0.0.1/tcp/9171])
diff --git a/docs/cli/defradb_client_index_list.md b/docs/cli/defradb_client_index_list.md
index a2d7ca8dd0..3c776f73ac 100644
--- a/docs/cli/defradb_client_index_list.md
+++ b/docs/cli/defradb_client_index_list.md
@@ -27,11 +27,14 @@ defradb client index list [-c --collection ] [flags]
```
--allowed-origins stringArray List of origins to allow for CORS requests
- --logformat string Log format to use. Options are csv, json (default "csv")
- --loglevel string Log level to use. Options are debug, info, error, fatal (default "info")
- --lognocolor Disable colored log output
- --logoutput string Log output path (default "stderr")
- --logtrace Include stacktrace in error and fatal logs
+ -i, --identity string ACP Identity
+ --log-format string Log format to use. Options are text or json (default "text")
+ --log-level string Log level to use. Options are debug, info, error, fatal (default "info")
+ --log-no-color Disable colored log output
+ --log-output string Log output path. Options are stderr or stdout. (default "stderr")
+ --log-overrides string Logger config overrides. Format ,=,...;,...
+ --log-source Include source location in logs
+ --log-stacktrace Include stacktrace in error and fatal logs
--max-txn-retries int Specify the maximum number of retries per transaction (default 5)
--no-p2p Disable the peer-to-peer network synchronization system
--p2paddr strings Listen addresses for the p2p network (formatted as a libp2p MultiAddr) (default [/ip4/127.0.0.1/tcp/9171])
diff --git a/docs/cli/defradb_client_p2p.md b/docs/cli/defradb_client_p2p.md
index 171e2ab661..2506208717 100644
--- a/docs/cli/defradb_client_p2p.md
+++ b/docs/cli/defradb_client_p2p.md
@@ -16,11 +16,14 @@ Interact with the DefraDB P2P system
```
--allowed-origins stringArray List of origins to allow for CORS requests
- --logformat string Log format to use. Options are csv, json (default "csv")
- --loglevel string Log level to use. Options are debug, info, error, fatal (default "info")
- --lognocolor Disable colored log output
- --logoutput string Log output path (default "stderr")
- --logtrace Include stacktrace in error and fatal logs
+ -i, --identity string ACP Identity
+ --log-format string Log format to use. Options are text or json (default "text")
+ --log-level string Log level to use. Options are debug, info, error, fatal (default "info")
+ --log-no-color Disable colored log output
+ --log-output string Log output path. Options are stderr or stdout. (default "stderr")
+ --log-overrides string Logger config overrides. Format ,=,...;,...
+ --log-source Include source location in logs
+ --log-stacktrace Include stacktrace in error and fatal logs
--max-txn-retries int Specify the maximum number of retries per transaction (default 5)
--no-p2p Disable the peer-to-peer network synchronization system
--p2paddr strings Listen addresses for the p2p network (formatted as a libp2p MultiAddr) (default [/ip4/127.0.0.1/tcp/9171])
diff --git a/docs/cli/defradb_client_p2p_collection.md b/docs/cli/defradb_client_p2p_collection.md
index 11ace67212..a1de966445 100644
--- a/docs/cli/defradb_client_p2p_collection.md
+++ b/docs/cli/defradb_client_p2p_collection.md
@@ -17,11 +17,14 @@ The selected collections synchronize their events on the pubsub network.
```
--allowed-origins stringArray List of origins to allow for CORS requests
- --logformat string Log format to use. Options are csv, json (default "csv")
- --loglevel string Log level to use. Options are debug, info, error, fatal (default "info")
- --lognocolor Disable colored log output
- --logoutput string Log output path (default "stderr")
- --logtrace Include stacktrace in error and fatal logs
+ -i, --identity string ACP Identity
+ --log-format string Log format to use. Options are text or json (default "text")
+ --log-level string Log level to use. Options are debug, info, error, fatal (default "info")
+ --log-no-color Disable colored log output
+ --log-output string Log output path. Options are stderr or stdout. (default "stderr")
+ --log-overrides string Logger config overrides. Format ,=,...;,...
+ --log-source Include source location in logs
+ --log-stacktrace Include stacktrace in error and fatal logs
--max-txn-retries int Specify the maximum number of retries per transaction (default 5)
--no-p2p Disable the peer-to-peer network synchronization system
--p2paddr strings Listen addresses for the p2p network (formatted as a libp2p MultiAddr) (default [/ip4/127.0.0.1/tcp/9171])
diff --git a/docs/cli/defradb_client_p2p_collection_add.md b/docs/cli/defradb_client_p2p_collection_add.md
index c54f235a60..01bc79ca0f 100644
--- a/docs/cli/defradb_client_p2p_collection_add.md
+++ b/docs/cli/defradb_client_p2p_collection_add.md
@@ -28,11 +28,14 @@ defradb client p2p collection add [collectionIDs] [flags]
```
--allowed-origins stringArray List of origins to allow for CORS requests
- --logformat string Log format to use. Options are csv, json (default "csv")
- --loglevel string Log level to use. Options are debug, info, error, fatal (default "info")
- --lognocolor Disable colored log output
- --logoutput string Log output path (default "stderr")
- --logtrace Include stacktrace in error and fatal logs
+ -i, --identity string ACP Identity
+ --log-format string Log format to use. Options are text or json (default "text")
+ --log-level string Log level to use. Options are debug, info, error, fatal (default "info")
+ --log-no-color Disable colored log output
+ --log-output string Log output path. Options are stderr or stdout. (default "stderr")
+ --log-overrides string Logger config overrides. Format ,=,...;,...
+ --log-source Include source location in logs
+ --log-stacktrace Include stacktrace in error and fatal logs
--max-txn-retries int Specify the maximum number of retries per transaction (default 5)
--no-p2p Disable the peer-to-peer network synchronization system
--p2paddr strings Listen addresses for the p2p network (formatted as a libp2p MultiAddr) (default [/ip4/127.0.0.1/tcp/9171])
diff --git a/docs/cli/defradb_client_p2p_collection_getall.md b/docs/cli/defradb_client_p2p_collection_getall.md
index 07c536d716..8d10944ad2 100644
--- a/docs/cli/defradb_client_p2p_collection_getall.md
+++ b/docs/cli/defradb_client_p2p_collection_getall.md
@@ -21,11 +21,14 @@ defradb client p2p collection getall [flags]
```
--allowed-origins stringArray List of origins to allow for CORS requests
- --logformat string Log format to use. Options are csv, json (default "csv")
- --loglevel string Log level to use. Options are debug, info, error, fatal (default "info")
- --lognocolor Disable colored log output
- --logoutput string Log output path (default "stderr")
- --logtrace Include stacktrace in error and fatal logs
+ -i, --identity string ACP Identity
+ --log-format string Log format to use. Options are text or json (default "text")
+ --log-level string Log level to use. Options are debug, info, error, fatal (default "info")
+ --log-no-color Disable colored log output
+ --log-output string Log output path. Options are stderr or stdout. (default "stderr")
+ --log-overrides string Logger config overrides. Format ,=,...;,...
+ --log-source Include source location in logs
+ --log-stacktrace Include stacktrace in error and fatal logs
--max-txn-retries int Specify the maximum number of retries per transaction (default 5)
--no-p2p Disable the peer-to-peer network synchronization system
--p2paddr strings Listen addresses for the p2p network (formatted as a libp2p MultiAddr) (default [/ip4/127.0.0.1/tcp/9171])
diff --git a/docs/cli/defradb_client_p2p_collection_remove.md b/docs/cli/defradb_client_p2p_collection_remove.md
index 5a8eb969b6..1cd6a14ee9 100644
--- a/docs/cli/defradb_client_p2p_collection_remove.md
+++ b/docs/cli/defradb_client_p2p_collection_remove.md
@@ -28,11 +28,14 @@ defradb client p2p collection remove [collectionIDs] [flags]
```
--allowed-origins stringArray List of origins to allow for CORS requests
- --logformat string Log format to use. Options are csv, json (default "csv")
- --loglevel string Log level to use. Options are debug, info, error, fatal (default "info")
- --lognocolor Disable colored log output
- --logoutput string Log output path (default "stderr")
- --logtrace Include stacktrace in error and fatal logs
+ -i, --identity string ACP Identity
+ --log-format string Log format to use. Options are text or json (default "text")
+ --log-level string Log level to use. Options are debug, info, error, fatal (default "info")
+ --log-no-color Disable colored log output
+ --log-output string Log output path. Options are stderr or stdout. (default "stderr")
+ --log-overrides string Logger config overrides. Format ,=,...;,...
+ --log-source Include source location in logs
+ --log-stacktrace Include stacktrace in error and fatal logs
--max-txn-retries int Specify the maximum number of retries per transaction (default 5)
--no-p2p Disable the peer-to-peer network synchronization system
--p2paddr strings Listen addresses for the p2p network (formatted as a libp2p MultiAddr) (default [/ip4/127.0.0.1/tcp/9171])
diff --git a/docs/cli/defradb_client_p2p_info.md b/docs/cli/defradb_client_p2p_info.md
index 27fdf7cb9b..385780ad3d 100644
--- a/docs/cli/defradb_client_p2p_info.md
+++ b/docs/cli/defradb_client_p2p_info.md
@@ -20,11 +20,14 @@ defradb client p2p info [flags]
```
--allowed-origins stringArray List of origins to allow for CORS requests
- --logformat string Log format to use. Options are csv, json (default "csv")
- --loglevel string Log level to use. Options are debug, info, error, fatal (default "info")
- --lognocolor Disable colored log output
- --logoutput string Log output path (default "stderr")
- --logtrace Include stacktrace in error and fatal logs
+ -i, --identity string ACP Identity
+ --log-format string Log format to use. Options are text or json (default "text")
+ --log-level string Log level to use. Options are debug, info, error, fatal (default "info")
+ --log-no-color Disable colored log output
+ --log-output string Log output path. Options are stderr or stdout. (default "stderr")
+ --log-overrides string Logger config overrides. Format ,=,...;,...
+ --log-source Include source location in logs
+ --log-stacktrace Include stacktrace in error and fatal logs
--max-txn-retries int Specify the maximum number of retries per transaction (default 5)
--no-p2p Disable the peer-to-peer network synchronization system
--p2paddr strings Listen addresses for the p2p network (formatted as a libp2p MultiAddr) (default [/ip4/127.0.0.1/tcp/9171])
diff --git a/docs/cli/defradb_client_p2p_replicator.md b/docs/cli/defradb_client_p2p_replicator.md
index 725845a726..b9d5b561c7 100644
--- a/docs/cli/defradb_client_p2p_replicator.md
+++ b/docs/cli/defradb_client_p2p_replicator.md
@@ -17,11 +17,14 @@ A replicator replicates one or all collection(s) from one node to another.
```
--allowed-origins stringArray List of origins to allow for CORS requests
- --logformat string Log format to use. Options are csv, json (default "csv")
- --loglevel string Log level to use. Options are debug, info, error, fatal (default "info")
- --lognocolor Disable colored log output
- --logoutput string Log output path (default "stderr")
- --logtrace Include stacktrace in error and fatal logs
+ -i, --identity string ACP Identity
+ --log-format string Log format to use. Options are text or json (default "text")
+ --log-level string Log level to use. Options are debug, info, error, fatal (default "info")
+ --log-no-color Disable colored log output
+ --log-output string Log output path. Options are stderr or stdout. (default "stderr")
+ --log-overrides string Logger config overrides. Format ,=,...;,...
+ --log-source Include source location in logs
+ --log-stacktrace Include stacktrace in error and fatal logs
--max-txn-retries int Specify the maximum number of retries per transaction (default 5)
--no-p2p Disable the peer-to-peer network synchronization system
--p2paddr strings Listen addresses for the p2p network (formatted as a libp2p MultiAddr) (default [/ip4/127.0.0.1/tcp/9171])
diff --git a/docs/cli/defradb_client_p2p_replicator_delete.md b/docs/cli/defradb_client_p2p_replicator_delete.md
index ef89979be6..93e5ff6d95 100644
--- a/docs/cli/defradb_client_p2p_replicator_delete.md
+++ b/docs/cli/defradb_client_p2p_replicator_delete.md
@@ -26,11 +26,14 @@ defradb client p2p replicator delete [-c, --collection] [flags]
```
--allowed-origins stringArray List of origins to allow for CORS requests
- --logformat string Log format to use. Options are csv, json (default "csv")
- --loglevel string Log level to use. Options are debug, info, error, fatal (default "info")
- --lognocolor Disable colored log output
- --logoutput string Log output path (default "stderr")
- --logtrace Include stacktrace in error and fatal logs
+ -i, --identity string ACP Identity
+ --log-format string Log format to use. Options are text or json (default "text")
+ --log-level string Log level to use. Options are debug, info, error, fatal (default "info")
+ --log-no-color Disable colored log output
+ --log-output string Log output path. Options are stderr or stdout. (default "stderr")
+ --log-overrides string Logger config overrides. Format ,=,...;,...
+ --log-source Include source location in logs
+ --log-stacktrace Include stacktrace in error and fatal logs
--max-txn-retries int Specify the maximum number of retries per transaction (default 5)
--no-p2p Disable the peer-to-peer network synchronization system
--p2paddr strings Listen addresses for the p2p network (formatted as a libp2p MultiAddr) (default [/ip4/127.0.0.1/tcp/9171])
diff --git a/docs/cli/defradb_client_p2p_replicator_getall.md b/docs/cli/defradb_client_p2p_replicator_getall.md
index 4d33b5243f..cc9cc1ed63 100644
--- a/docs/cli/defradb_client_p2p_replicator_getall.md
+++ b/docs/cli/defradb_client_p2p_replicator_getall.md
@@ -25,11 +25,14 @@ defradb client p2p replicator getall [flags]
```
--allowed-origins stringArray List of origins to allow for CORS requests
- --logformat string Log format to use. Options are csv, json (default "csv")
- --loglevel string Log level to use. Options are debug, info, error, fatal (default "info")
- --lognocolor Disable colored log output
- --logoutput string Log output path (default "stderr")
- --logtrace Include stacktrace in error and fatal logs
+ -i, --identity string ACP Identity
+ --log-format string Log format to use. Options are text or json (default "text")
+ --log-level string Log level to use. Options are debug, info, error, fatal (default "info")
+ --log-no-color Disable colored log output
+ --log-output string Log output path. Options are stderr or stdout. (default "stderr")
+ --log-overrides string Logger config overrides. Format ,=,...;,...
+ --log-source Include source location in logs
+ --log-stacktrace Include stacktrace in error and fatal logs
--max-txn-retries int Specify the maximum number of retries per transaction (default 5)
--no-p2p Disable the peer-to-peer network synchronization system
--p2paddr strings Listen addresses for the p2p network (formatted as a libp2p MultiAddr) (default [/ip4/127.0.0.1/tcp/9171])
diff --git a/docs/cli/defradb_client_p2p_replicator_set.md b/docs/cli/defradb_client_p2p_replicator_set.md
index 55654ded0f..4fbc980a7c 100644
--- a/docs/cli/defradb_client_p2p_replicator_set.md
+++ b/docs/cli/defradb_client_p2p_replicator_set.md
@@ -26,11 +26,14 @@ defradb client p2p replicator set [-c, --collection] [flags]
```
--allowed-origins stringArray List of origins to allow for CORS requests
- --logformat string Log format to use. Options are csv, json (default "csv")
- --loglevel string Log level to use. Options are debug, info, error, fatal (default "info")
- --lognocolor Disable colored log output
- --logoutput string Log output path (default "stderr")
- --logtrace Include stacktrace in error and fatal logs
+ -i, --identity string ACP Identity
+ --log-format string Log format to use. Options are text or json (default "text")
+ --log-level string Log level to use. Options are debug, info, error, fatal (default "info")
+ --log-no-color Disable colored log output
+ --log-output string Log output path. Options are stderr or stdout. (default "stderr")
+ --log-overrides string Logger config overrides. Format ,=,...;,...
+ --log-source Include source location in logs
+ --log-stacktrace Include stacktrace in error and fatal logs
--max-txn-retries int Specify the maximum number of retries per transaction (default 5)
--no-p2p Disable the peer-to-peer network synchronization system
--p2paddr strings Listen addresses for the p2p network (formatted as a libp2p MultiAddr) (default [/ip4/127.0.0.1/tcp/9171])
diff --git a/docs/cli/defradb_client_query.md b/docs/cli/defradb_client_query.md
index b23bf50553..493acca2d4 100644
--- a/docs/cli/defradb_client_query.md
+++ b/docs/cli/defradb_client_query.md
@@ -12,6 +12,9 @@ A query request can be sent as a single argument. Example command:
Do a query request from a file by using the '-f' flag. Example command:
defradb client query -f request.graphql
+Do a query request from a file and with an identity. Example command:
+ defradb client query -i cosmos1f2djr7dl9vhrk3twt3xwqp09nhtzec9mdkf70j -f request.graphql
+
Or it can be sent via stdin by using the '-' special syntax. Example command:
cat request.graphql | defradb client query -
@@ -21,7 +24,7 @@ with the database more conveniently.
To learn more about the DefraDB GraphQL Query Language, refer to https://docs.source.network.
```
-defradb client query [query request] [flags]
+defradb client query [-i --identity] [request] [flags]
```
### Options
@@ -35,11 +38,14 @@ defradb client query [query request] [flags]
```
--allowed-origins stringArray List of origins to allow for CORS requests
- --logformat string Log format to use. Options are csv, json (default "csv")
- --loglevel string Log level to use. Options are debug, info, error, fatal (default "info")
- --lognocolor Disable colored log output
- --logoutput string Log output path (default "stderr")
- --logtrace Include stacktrace in error and fatal logs
+ -i, --identity string ACP Identity
+ --log-format string Log format to use. Options are text or json (default "text")
+ --log-level string Log level to use. Options are debug, info, error, fatal (default "info")
+ --log-no-color Disable colored log output
+ --log-output string Log output path. Options are stderr or stdout. (default "stderr")
+ --log-overrides string Logger config overrides. Format ,=,...;,...
+ --log-source Include source location in logs
+ --log-stacktrace Include stacktrace in error and fatal logs
--max-txn-retries int Specify the maximum number of retries per transaction (default 5)
--no-p2p Disable the peer-to-peer network synchronization system
--p2paddr strings Listen addresses for the p2p network (formatted as a libp2p MultiAddr) (default [/ip4/127.0.0.1/tcp/9171])
diff --git a/docs/cli/defradb_client_schema.md b/docs/cli/defradb_client_schema.md
index d37251c8db..2e144a89e6 100644
--- a/docs/cli/defradb_client_schema.md
+++ b/docs/cli/defradb_client_schema.md
@@ -16,11 +16,14 @@ Make changes, updates, or look for existing schema types.
```
--allowed-origins stringArray List of origins to allow for CORS requests
- --logformat string Log format to use. Options are csv, json (default "csv")
- --loglevel string Log level to use. Options are debug, info, error, fatal (default "info")
- --lognocolor Disable colored log output
- --logoutput string Log output path (default "stderr")
- --logtrace Include stacktrace in error and fatal logs
+ -i, --identity string ACP Identity
+ --log-format string Log format to use. Options are text or json (default "text")
+ --log-level string Log level to use. Options are debug, info, error, fatal (default "info")
+ --log-no-color Disable colored log output
+ --log-output string Log output path. Options are stderr or stdout. (default "stderr")
+ --log-overrides string Logger config overrides. Format ,=,...;,...
+ --log-source Include source location in logs
+ --log-stacktrace Include stacktrace in error and fatal logs
--max-txn-retries int Specify the maximum number of retries per transaction (default 5)
--no-p2p Disable the peer-to-peer network synchronization system
--p2paddr strings Listen addresses for the p2p network (formatted as a libp2p MultiAddr) (default [/ip4/127.0.0.1/tcp/9171])
diff --git a/docs/cli/defradb_client_schema_add.md b/docs/cli/defradb_client_schema_add.md
index e0ad675241..0ff3f683f4 100644
--- a/docs/cli/defradb_client_schema_add.md
+++ b/docs/cli/defradb_client_schema_add.md
@@ -6,6 +6,11 @@ Add new schema
Add new schema.
+Schema Object with a '@policy(id:".." resource: "..")' linked will only be accepted if:
+ - ACP is available (i.e. ACP is not disabled).
+ - The specified resource adheres to the Document Access Control DPI Rules.
+ - Learn more about [ACP & DPI Rules](/acp/README.md)
+
Example: add from an argument string:
defradb client schema add 'type Foo { ... }'
@@ -32,11 +37,14 @@ defradb client schema add [schema] [flags]
```
--allowed-origins stringArray List of origins to allow for CORS requests
- --logformat string Log format to use. Options are csv, json (default "csv")
- --loglevel string Log level to use. Options are debug, info, error, fatal (default "info")
- --lognocolor Disable colored log output
- --logoutput string Log output path (default "stderr")
- --logtrace Include stacktrace in error and fatal logs
+ -i, --identity string ACP Identity
+ --log-format string Log format to use. Options are text or json (default "text")
+ --log-level string Log level to use. Options are debug, info, error, fatal (default "info")
+ --log-no-color Disable colored log output
+ --log-output string Log output path. Options are stderr or stdout. (default "stderr")
+ --log-overrides string Logger config overrides. Format ,=,...;,...
+ --log-source Include source location in logs
+ --log-stacktrace Include stacktrace in error and fatal logs
--max-txn-retries int Specify the maximum number of retries per transaction (default 5)
--no-p2p Disable the peer-to-peer network synchronization system
--p2paddr strings Listen addresses for the p2p network (formatted as a libp2p MultiAddr) (default [/ip4/127.0.0.1/tcp/9171])
diff --git a/docs/cli/defradb_client_schema_describe.md b/docs/cli/defradb_client_schema_describe.md
index cd79cce3c1..0b28a1e64e 100644
--- a/docs/cli/defradb_client_schema_describe.md
+++ b/docs/cli/defradb_client_schema_describe.md
@@ -36,11 +36,14 @@ defradb client schema describe [flags]
```
--allowed-origins stringArray List of origins to allow for CORS requests
- --logformat string Log format to use. Options are csv, json (default "csv")
- --loglevel string Log level to use. Options are debug, info, error, fatal (default "info")
- --lognocolor Disable colored log output
- --logoutput string Log output path (default "stderr")
- --logtrace Include stacktrace in error and fatal logs
+ -i, --identity string ACP Identity
+ --log-format string Log format to use. Options are text or json (default "text")
+ --log-level string Log level to use. Options are debug, info, error, fatal (default "info")
+ --log-no-color Disable colored log output
+ --log-output string Log output path. Options are stderr or stdout. (default "stderr")
+ --log-overrides string Logger config overrides. Format ,=,...;,...
+ --log-source Include source location in logs
+ --log-stacktrace Include stacktrace in error and fatal logs
--max-txn-retries int Specify the maximum number of retries per transaction (default 5)
--no-p2p Disable the peer-to-peer network synchronization system
--p2paddr strings Listen addresses for the p2p network (formatted as a libp2p MultiAddr) (default [/ip4/127.0.0.1/tcp/9171])
diff --git a/docs/cli/defradb_client_schema_migration.md b/docs/cli/defradb_client_schema_migration.md
index b49420401c..c339763571 100644
--- a/docs/cli/defradb_client_schema_migration.md
+++ b/docs/cli/defradb_client_schema_migration.md
@@ -16,11 +16,14 @@ Make set or look for existing schema migrations on a DefraDB node.
```
--allowed-origins stringArray List of origins to allow for CORS requests
- --logformat string Log format to use. Options are csv, json (default "csv")
- --loglevel string Log level to use. Options are debug, info, error, fatal (default "info")
- --lognocolor Disable colored log output
- --logoutput string Log output path (default "stderr")
- --logtrace Include stacktrace in error and fatal logs
+ -i, --identity string ACP Identity
+ --log-format string Log format to use. Options are text or json (default "text")
+ --log-level string Log level to use. Options are debug, info, error, fatal (default "info")
+ --log-no-color Disable colored log output
+ --log-output string Log output path. Options are stderr or stdout. (default "stderr")
+ --log-overrides string Logger config overrides. Format ,=,...;,...
+ --log-source Include source location in logs
+ --log-stacktrace Include stacktrace in error and fatal logs
--max-txn-retries int Specify the maximum number of retries per transaction (default 5)
--no-p2p Disable the peer-to-peer network synchronization system
--p2paddr strings Listen addresses for the p2p network (formatted as a libp2p MultiAddr) (default [/ip4/127.0.0.1/tcp/9171])
diff --git a/docs/cli/defradb_client_schema_migration_down.md b/docs/cli/defradb_client_schema_migration_down.md
index 6172bf09b1..f741f5bec9 100644
--- a/docs/cli/defradb_client_schema_migration_down.md
+++ b/docs/cli/defradb_client_schema_migration_down.md
@@ -33,11 +33,14 @@ defradb client schema migration down --collection [fl
```
--allowed-origins stringArray List of origins to allow for CORS requests
- --logformat string Log format to use. Options are csv, json (default "csv")
- --loglevel string Log level to use. Options are debug, info, error, fatal (default "info")
- --lognocolor Disable colored log output
- --logoutput string Log output path (default "stderr")
- --logtrace Include stacktrace in error and fatal logs
+ -i, --identity string ACP Identity
+ --log-format string Log format to use. Options are text or json (default "text")
+ --log-level string Log level to use. Options are debug, info, error, fatal (default "info")
+ --log-no-color Disable colored log output
+ --log-output string Log output path. Options are stderr or stdout. (default "stderr")
+ --log-overrides string Logger config overrides. Format ,=,...;,...
+ --log-source Include source location in logs
+ --log-stacktrace Include stacktrace in error and fatal logs
--max-txn-retries int Specify the maximum number of retries per transaction (default 5)
--no-p2p Disable the peer-to-peer network synchronization system
--p2paddr strings Listen addresses for the p2p network (formatted as a libp2p MultiAddr) (default [/ip4/127.0.0.1/tcp/9171])
diff --git a/docs/cli/defradb_client_schema_migration_get.md b/docs/cli/defradb_client_schema_migration_get.md
deleted file mode 100644
index 20ed8edb91..0000000000
--- a/docs/cli/defradb_client_schema_migration_get.md
+++ /dev/null
@@ -1,41 +0,0 @@
-## defradb client schema migration get
-
-Gets the schema migrations within DefraDB
-
-### Synopsis
-
-Gets the schema migrations within the local DefraDB node.
-
-Example:
- defradb client schema migration get'
-
-Learn more about the DefraDB GraphQL Schema Language on https://docs.source.network.
-
-```
-defradb client schema migration get [flags]
-```
-
-### Options
-
-```
- -h, --help help for get
-```
-
-### Options inherited from parent commands
-
-```
- --logformat string Log format to use. Options are csv, json (default "csv")
- --logger stringArray Override logger parameters. Usage: --logger ,level=,output=